Merge "clk: qcom: clk-gcc: Remove VDD mapping from gpll0 for sdx24"
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..4341e3a
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,27 @@
+cc_binary_host {
+    name: "unifdef",
+    srcs: ["scripts/unifdef.c"],
+    sanitize: {
+        never: true,
+    }
+}
+
+gensrcs {
+    name: "qseecom-kernel-includes",
+
+    // move to out/ as root for header generation because of scripts/unifdef
+    // storage - at the expense of extra ../ references
+    cmd: "pushd out && mkdir -p scripts && rm -f scripts/unifdef && ln -s ../../$(location unifdef) scripts/unifdef && ../$(location scripts/headers_install.sh) `dirname ../$(out)` ../ $(in) && popd",
+
+    tools: ["unifdef"],
+    tool_files: ["scripts/headers_install.sh"],
+    export_include_dirs: ["include/uapi"],
+    srcs: ["include/uapi/linux/qseecom.h"],
+    output_extension: "h",
+}
+
+cc_library_headers {
+    name: "qseecom-kernel-headers",
+    generated_headers: ["qseecom-kernel-includes"],
+    export_generated_headers: ["qseecom-kernel-includes"],
+}
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index dfd56ec..6d75a9c 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -355,6 +355,7 @@
 		/sys/devices/system/cpu/vulnerabilities/meltdown
 		/sys/devices/system/cpu/vulnerabilities/spectre_v1
 		/sys/devices/system/cpu/vulnerabilities/spectre_v2
+		/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
 Date:		January 2018
 Contact:	Linux kernel mailing list <linux-kernel@vger.kernel.org>
 Description:	Information about CPU vulnerabilities
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index b8d0a30..f82da9b 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -101,6 +101,7 @@
 Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
 Description:
 		 Controls the trimming rate in batch mode.
+		 <deprecated>
 
 What:		/sys/fs/f2fs/<disk>/cp_interval
 Date:		October 2015
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 1699a55..ef63996 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -112,9 +112,11 @@
 free space on the data device drops below this level then a dm event
 will be triggered which a userspace daemon should catch allowing it to
 extend the pool device.  Only one such event will be sent.
-Resuming a device with a new table itself triggers an event so the
-userspace daemon can use this to detect a situation where a new table
-already exceeds the threshold.
+
+No special event is triggered if a just resumed device's free space is below
+the low water mark. However, resuming a device always triggers an
+event; a userspace daemon should verify that free space exceeds the low
+water mark when handling this event.
 
 A low water mark for the metadata device is maintained in the kernel and
 will trigger a dm event if free space on the metadata device drops below
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index 3a96610..ee9b465 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -110,6 +110,9 @@
 	* cpu: the cpu phandle this ETM/PTM is affined to. When omitted the
 	  source is considered to belong to CPU0.
 
+	* qcom,tupwr-disable: For ETM, don't keep trace unit powered across power
+	  collapse.
+
 * Optional property for TMC:
 
 	* arm,buffer-size: size of contiguous buffer space for TMC ETR
diff --git a/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt b/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt
index e63d09b..de2a963 100644
--- a/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt
+++ b/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt
@@ -29,6 +29,10 @@
 
 - qcom,allocate-boot-time:	Indicates whether clients needs boot time memory allocation.
 
+- qcom,allocate-on-request:	Indicates memory allocation happens only when client requests.
+
+/* "qcom,allocate-boot-time" and "qcom,allocate-on-request" are mutually exclusive properties. */
+
 Example:
 
 qcom,memshare {
diff --git a/Documentation/devicetree/bindings/arm/msm/lpm-workarounds.txt b/Documentation/devicetree/bindings/arm/msm/lpm-workarounds.txt
new file mode 100644
index 0000000..0304035
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/lpm-workarounds.txt
@@ -0,0 +1,55 @@
+* LPM Workarounds
+
+The required properties are:
+
+- compatible: "qcom,lpm-workarounds"
+
+The optional properties are:
+- reg: The physical address and the size of the l1_l2_gcc and l2_pwr_sts
+	regitsters of performance cluster.
+
+- reg-names: "l2_pwr_sts" - string to identify l2_pwr_sts physical address.
+	     "l1_l2_gcc" - string to identify l1_l2_gcc physical address.
+
+- qcom,lpm-wa-cx-turbo-unvote: Indicates the workaround to unvote CX turbo
+	vote when system is coming out of rpm assisted power collaspe.
+	lpm-cx-supply is required if this is present.
+
+- lpm-cx-supply:  will hold handle for CX regulator supply which is used
+	to unvote.
+
+- qcom,lpm-wa-skip-l2-spm: Due to a hardware bug on 8939 and 8909, secure
+	world needs to disable and enable L2 SPM to get the proper context
+	in secure watchdog bite cases. With this workaround there is a race
+	in programming L2 SPM between HLOS and secure world. This leads to
+	stability issues. To avoid this program L2 SPM only in secure world
+	based on the L2 mode flag passed. Set lpm-wa-skip-l2-spm node if this
+	is required.
+
+- qcom,lpm-wa-dynamic-clock-gating: Due to a hardware bug on 8952, L1/L2 dynamic
+	clock gating needs to be enabled by software for performance cluster
+	cores and L2. Set lpm-wa-dynamic-clock-gating node if this workaround is
+	required.
+
+- qcom,cpu-offline-mask: Dynamic clock gating should be enabled when cluster is
+	in L2 PC. Each bit of cpu-offline-mask lists the cpu no. to hotplug by KTM
+	driver.
+
+- qcom,non-boot-cpu-index: will hold index of non boot cluster cpu.
+
+- qcom,l1-l2-gcc-secure: indicates L1/L2 clock enabling register is secure.
+
+Example:
+
+qcom,lpm-workarounds {
+	compatible = "qcom,lpm-workarounds";
+	reg = <0x0B011018 0x4>,
+	      <0x0B011088 0x4>;
+	reg-names = "l2-pwr-sts", "l1-l2-gcc";
+	lpm-cx-supply = <&pm8916_s2_corner>;
+	qcom,lpm-wa-cx-turbo-unvote;
+	qcom,lpm-wa-skip-l2-spm;
+	qcom,lpm-wa-dynamic-clock-gating;
+	qcom,cpu-offline-mask = "0xF";
+	qcom,non-boot-cpu-index = <4>;
+}
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index 9c2d647..b2640da 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -95,6 +95,9 @@
 - QCS605
   compatible = "qcom,qcs605"
 
+- SXR1120
+  compatible = "qcom,sxr1120"
+
 - SDA670
   compatible = "qcom,sda670"
 
@@ -324,8 +327,11 @@
 compatible = "qcom,sdm670-qrd"
 compatible = "qcom,qcs605-cdp"
 compatible = "qcom,qcs605-mtp"
+compatible = "qcom,sxr1120-mtp"
+compatible = "qcom,sxr1120-cdp"
 compatible = "qcom,sda670-cdp"
 compatible = "qcom,sda670-mtp"
+compatible = "qcom,sda670-hdk"
 compatible = "qcom,msm8952-rumi"
 compatible = "qcom,msm8952-sim"
 compatible = "qcom,msm8952-qrd"
@@ -382,4 +388,5 @@
 compatible = "qcom,sdxpoorwills-atp"
 compatible = "qcom,sdxpoorwills-mtp"
 compatible = "qcom,sdxpoorwills-cdp"
+compatible = "qcom,sdxpoorwills-ttp"
 compatible = "qcom,mdm9607-ttp"
diff --git a/Documentation/devicetree/bindings/arm/msm/pm.txt b/Documentation/devicetree/bindings/arm/msm/pm.txt
new file mode 100644
index 0000000..b66d4a6
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/pm.txt
@@ -0,0 +1,49 @@
+* MSM PM
+
+PM is the low power management device for MSM (Snapdragon class) chipsets.
+This device sets up different components to do low power modes and registers with
+the kernel to be notified of idle and suspend states and when called, follows
+through the set of instructions in putting the application cores to the lowest
+power mode possible.
+The PC debug counter reserves 16 registers in the IMEM memory space which maintains
+a count on the state of power collapse on each core. This count will be useful to
+debug the power collapse state on each core.
+
+The required properties for PM are:
+
+- compatible: "qcom,pm"
+
+The optional properties are:
+
+- reg: physical IMEM address reserved for PC counters and the size
+- qcom,use-sync-timer: Indicates whether the target uses the synchronized QTimer.
+- qcom,synced-clocks: Indicates that all cpus running off a single clock source and to
+	instantiate the necessary clock source.
+- qcom,pc-resets-timer: Indicates that the timer gets reset during power collapse.
+- qcom,tz-flushes-cache: Indicates that TZ flushes all of the cache during
+power collapse. MSM PM can decide to not perform cache flush operations to
+reduce latency associated with L2 PC.
+- qcom,saw-turns-off-pll: Indicates that the CPU's PLL can be managed from SAW
+	hardware. On such targets software management of PLL is not required. If
+	this property is specified then qcom,synced-clocks would be ignored.
+- qcom,no-pll-switch-for-retention: Boolean property, to indicate that the cpu
+	clock can be sourced even from the HFPLL even when the cpu is in
+	retention, and need not be switched to an always on pll. If this flag
+	is set then the cpu clock is not ramped down when entering retention or
+	ramped up on exiting retention.
+
+Example 1:
+
+qcom,pm@fe800664 {
+		compatible = "qcom,pm";
+		reg = <0xfe800664 0x40>;
+		qcom,use-sync-timer;
+	};
+
+Example 2:
+
+qcom,pm@fe800664 {
+		compatible = "qcom,pm";
+		reg = <0xfe800664 0x40>;
+		qcom,saw-turns-off-pll;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/pm_snoc_client.txt b/Documentation/devicetree/bindings/arm/msm/pm_snoc_client.txt
new file mode 100644
index 0000000..4f7111f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/pm_snoc_client.txt
@@ -0,0 +1,35 @@
+* MSM PM SNOC client
+
+MSM PM SNOC client device is used to setup a bus request for 100 Mhz for the
+SNOC bus when the Apps cores are active. This bus request helps mitigate the
+exit latency from power collapse in cases where there aren't any active bus
+requests for SNOC.
+
+This device is dependent on the pm-8x60 device, which configures the low power
+mode of respective cores.
+
+The required properties of this device are:
+
+- compatible: qcom,pm-snoc-client
+- qcom,msm-bus,name:            String representing the client-name
+- qcom,msm-bus,num-cases:       Total number of usecases
+- qcom,msm-bus,active-only:     Boolean context flag for requests in active or
+                                dual (active & sleep) contex
+- qcom,msm-bus,num-paths:       Total number of master-slave pairs
+- qcom,msm-bus,vectors-KBps:    Arrays of unsigned integers representing:
+                                master-id, slave-id, arbitrated bandwidth
+                                in KBps, instantaneous bandwidth in KBps
+
+
+Example:
+	qcom,pm-snoc-client {
+		compatible = "qcom,pm-snoc-client";
+		qcom,msm-bus,name = "ocimem_snoc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,active-only;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors =
+			<22 512 0 0>,
+			<22 512 320000 3200000>;
+	};
+
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index 0589165..15f1e93 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -103,6 +103,8 @@
 - qcom,sde-dsc-size:		A u32 value indicates the address range for each dsc.
 - qcom,sde-cdm-size:		A u32 value indicates the address range for each cdm.
 - qcom,sde-pp-size:		A u32 value indicates the address range for each pingpong.
+- qcom,sde-te-source:		Array of GPIO sources indicating which pingpong TE is
+				sourced to which panel TE gpio.
 - qcom,sde-wb-size:		A u32 value indicates the address range for each writeback.
 - qcom,sde-len:			A u32 entry for SDE address range.
 - qcom,sde-intf-max-prefetch-lines:	Array of u32 values for max prefetch lines on
@@ -502,6 +504,7 @@
     qcom,sde-pp-off = <0x00071000 0x00071800
 			  0x00072000 0x00072800>;
     qcom,sde-pp-slave = <0x0 0x0 0x0 0x0>;
+    qcom,sde-te-source = <0x0 0x1 0x0 0x0>;
     qcom,sde-cdm-off = <0x0007a200>;
     qcom,sde-dsc-off = <0x00081000 0x00081400>;
     qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>;
diff --git a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
index 217a90e..9c38bbe 100644
--- a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
+++ b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
@@ -11,7 +11,11 @@
   interrupts.
 
 Optional properties:
-- clocks: Optional reference to the clock used by the XOR engine.
+- clocks: Optional reference to the clocks used by the XOR engine.
+- clock-names: mandatory if there is a second clock, in this case the
+   name must be "core" for the first clock and "reg" for the second
+   one
+
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index 24290c8..c17970c 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -521,6 +521,9 @@
 					value 0.
 - qcom,mdss-dsi-dma-schedule-line:	An integer value indicates the line number after vertical active
 					region, at which command DMA needs to be triggered.
+- qcom,mdss-dsi-panel-cmds-only-by-right: Boolean used to mention whether the panel support DSI1 or
+					DSI0 to send commands. If this was set, that mean the panel only support
+					DSI1 to send commands, otherwise DSI0 will send comands.
 
 Required properties for sub-nodes:	None
 Optional properties:
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
index 73a6dbc..7454863 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -285,6 +285,35 @@
 					60 = 60 frames per second (default)
 - qcom,mdss-dsi-panel-clockrate:	A 64 bit value specifies the panel clock speed in Hz.
 					0 = default value.
+- qcom,mdss-mdp-kickoff-threshold:	This property can be used to define a region
+					(in terms of scanlines) where the
+hardware is allowed
+					to trigger a data transfer from MDP to DSI.
+					If this property is used, the region must be defined setting
+					two values, the low and the high thresholds:
+					<low_threshold high_threshold>
+					Where following condition must be met:
+					low_threshold < high_threshold
+					These values will be used by the driver in such way that if
+					the Driver receives a request to kickoff a transfer (MDP to DSI),
+					the transfer will be triggered only if the following condition
+					is satisfied:
+					low_threshold < scanline < high_threshold
+					If the condition is not met, then the driver will delay the
+					transfer by the time defined in the following property:
+					"qcom,mdss-mdp-kickoff-delay".
+					So in order to use this property, the delay property must
+					be defined as well and greater than 0.
+- qcom,mdss-mdp-kickoff-delay:	This property defines the delay in microseconds that
+					the driver will delay before triggering an MDP transfer if the
+					thresholds defined by the following property are not met:
+					"qcom,mdss-mdp-kickoff-threshold".
+					So in order to use this property, the threshold property must
+					be defined as well. Note that this delay cannot be zero
+					and also should not be greater than
+the fps window.
+					i.e. For 60fps value should not exceed
+16666 uS.
 - qcom,mdss-mdp-transfer-time-us:	Specifies the dsi transfer time for command mode
 					panels in microseconds. Driver uses this number to adjust
 					the clock rate according to the expected transfer time.
@@ -634,6 +663,8 @@
 		qcom,mdss-dsi-dma-trigger = <0>;
 		qcom,mdss-dsi-panel-framerate = <60>;
 		qcom,mdss-dsi-panel-clockrate = <424000000>;
+		qcom,mdss-mdp-kickoff-threshold = <11 2430>;
+		qcom,mdss-mdp-kickoff-delay = <1000>;
 		qcom,mdss-mdp-transfer-time-us = <12500>;
 		qcom,mdss-dsi-panel-timings = [7d 25 1d 00 37 33
 					22 27 1e 03 04 00];
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi.txt b/Documentation/devicetree/bindings/fb/mdss-dsi.txt
index 1934bc5..2d689d2 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi.txt
@@ -113,6 +113,7 @@
 - qcom,platform-bklight-en-gpio-invert:	Invert the gpio used to enable display back-light
 - qcom,panel-mode-gpio:			Specifies the GPIO to select video/command/single-port/dual-port
 					mode of panel through gpio when it supports these modes.
+- qcom,ext-vdd-gpio:			Specifies the GPIO to enable external VDD supply.
 - pinctrl-names:			List of names to assign mdss pin states defined in pinctrl device node
 					Refer to pinctrl-bindings.txt
 - pinctrl-<0..n>:			Lists phandles each pointing to the pin configuration node within a pin
diff --git a/Documentation/devicetree/bindings/fb/mdss-spi-client.txt b/Documentation/devicetree/bindings/fb/mdss-spi-client.txt
new file mode 100644
index 0000000..0d5fde8
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-spi-client.txt
@@ -0,0 +1,27 @@
+Qualcomm Technologies, Inc. mdss-spi-client
+
+mdss-spi-client is for SPI display to send the FB data to SPI master.
+
+Required properties:
+- compatible : should be "qcom,mdss-spi-client"
+- spi-max-frequency : Maximum SPI clocking speed of device in Hz
+
+Optional properties:
+- label: A string used to describe the controller used.
+- spi-cpol : Boolean property indicating device requires inverse
+  clock polarity (CPOL) mode
+- spi-cpha :  Empty property indicating device requires shifted
+  clock phase (CPHA) mode
+- spi-cs-high :  Empty property indicating device requires
+  chip select active high
+
+Example:
+spi@78b9000 { /* BLSP1 QUP5 */
+	qcom,mdss_spi_client {
+		reg = <0>;
+		compatible = "qcom,mdss-spi-client";
+		label = "MDSS SPI QUP5 CLIENT";
+		spi-max-frequency = <50000000>;
+	};
+};
+
diff --git a/Documentation/devicetree/bindings/fb/mdss-spi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-spi-panel.txt
new file mode 100644
index 0000000..d46068f
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-spi-panel.txt
@@ -0,0 +1,203 @@
+Qualcomm Technologies, Inc. mdss-spi-panel
+
+mdss-spi-panel is a SPI panel device which supports panels that
+are compatible with display serial interface specification.
+
+Required properties:
+- qcom,mdss-spi-panel-controller:	Specifies the phandle for the SPI controller that
+					this panel will be mapped to.
+- qcom,mdss-spi-panel-width:		Specifies panel width in pixels.
+- qcom,mdss-spi-panel-height:		Specifies panel height in pixels.
+- qcom,mdss-spi-bpp:			Specifies the panel bits per pixels.
+					3  = for rgb111
+					8  = for rgb332
+					12 = for rgb444
+					16 = for rgb565
+					18 = for rgb666
+					24 = for rgb888
+- qcom,mdss-spi-panel-destination:	A string that specifies the destination display for the panel.
+					"display_1" = DISPLAY_1
+					"display_2" = DISPLAY_2
+- qcom,mdss-spi-on-command:		A byte stream formed by multiple packets
+					byte 0: wait number of specified ms after command
+						 transmitted
+					byte 1: 8 bits length in network byte order
+					byte 3 and beyond: number byte of payload
+- qcom,mdss-spi-off-command:		A byte stream formed by multiple packets
+					byte 0: wait number of specified ms after command
+						 transmitted
+					byte 1: 8 bits length in network byte order
+					byte 3 and beyond: number byte of payload
+Optional properties:
+- qcom,mdss-spi-panel-name:		A string used as a descriptive name of the panel
+- qcom,cont-splash-enabled:		Boolean used to enable continuous splash mode.
+					If this property is specified, it is required to
+					to specify the memory reserved for the splash
+					screen using the qcom,memblock-reserve binding
+					for the framebuffer device attached to the panel.
+- qcom,mdss-spi-h-back-porch:		Horizontal back porch value in pixels.
+					6 = default value.
+- qcom,mdss-spi-h-front-porch:		Horizontal front porch value in pixels.
+					6 = default value.
+- qcom,mdss-spi-h-pulse-width:		Horizontal pulse width.
+					2 = default value.
+- qcom,mdss-spi-h-sync-skew:		Horizontal sync skew value.
+					0 = default value.
+- qcom,mdss-spi-v-back-porch:		Vertical back porch value in pixels.
+					6 = default value.
+- qcom,mdss-spi-v-front-porch:		Vertical front porch value in pixels.
+					6 = default value.
+- qcom,mdss-spi-v-pulse-width:		Vertical pulse width.
+					2 = default value.
+- qcom,mdss-spi-bl-pmic-control-type:	A string that specifies the implementation of backlight
+					control for this panel.
+					"bl_ctrl_pwm" = Backlight controlled by PWM gpio.
+					"bl_ctrl_wled" = Backlight controlled by WLED.
+					other: Unknown backlight control. (default)
+- qcom,mdss-spi-bl-min-level:		Specifies the min backlight level supported by the panel.
+					0 = default value.
+- qcom,mdss-spi-bl-max-level:		Specifies the max backlight level supported by the panel.
+					255 = default value.
+- qcom,mdss-spi-panel-framerate:	Specifies the frame rate for the panel.
+- qcom,esd-check-enabled:		Boolean used to enable ESD recovery feature.
+- qcom,mdss-spi-panel-status-check-mode:Specifies the panel status check method for ESD recovery.
+					"send_init_command" = send init code to recover panel status.
+					"reg_read" = Read register value to check the panel status.
+- qcom,mdss-spi-panel-status-reg:Unsigned 8bits integer value to specifies the value
+					of panel status register address.
+- qcom,mdss-spi-panel-status-read-length:Unsigned 8bits integer value that specifies
+					the expected read-back length of the panel register.
+- qcom,mdss-spi-panel-status-value:An unsigned 8bits integer araray that specifies the
+					values of the panel status register which is used to
+					check the panel status.
+					The size of this array is specified by
+					qcom,mdss-dsi-panel-status-read-length.
+
+Example:
+&mdss_mdp {
+	spi_gc9305_qvga_cmd: qcom,mdss_spi_gc9305_qvga_cmd {
+		qcom,mdss-spi-panel-name = "gc9305 qvga command mode spi panel";
+		qcom,mdss-spi-panel-destination = "display_1";
+		qcom,mdss-spi-panel-controller = <&mdss_spi>;
+		qcom,mdss-spi-panel-framerate = <30>;
+		qcom,mdss-spi-panel-width = <240>;
+		qcom,mdss-spi-panel-height = <320>;
+		qcom,mdss-spi-h-front-porch = <79>;
+		qcom,mdss-spi-h-back-porch = <59>;
+		qcom,mdss-spi-h-pulse-width = <60>;
+		qcom,mdss-spi-v-back-porch = <10>;
+		qcom,mdss-spi-v-front-porch = <7>;
+		qcom,mdss-spi-v-pulse-width = <2>;
+		qcom,mdss-spi-h-left-border = <0>;
+		qcom,mdss-spi-h-right-border = <0>;
+		qcom,mdss-spi-v-top-border = <0>;
+		qcom,mdss-spi-v-bottom-border = <0>;
+		qcom,mdss-spi-bpp = <16>;
+		qcom,mdss-spi-on-command = [00 01 FE
+			00 01 EF
+			00 02 36 48
+			00 02 3A 05
+			00 02 35 00
+			00 03 A4 44 44
+			00 03 A5 42 42
+			00 03 AA 88 88
+			00 03 E8 12 40
+			00 03 E3 01 10
+			00 02 FF 61
+			00 02 AC 00
+			00 03 A6 2A 2A
+			00 03 A7 2B 2B
+			00 03 A8 18 18
+			00 03 A9 2A 2A
+			00 02 AD 33
+			00 02 AF 55
+			00 02 AE 2B
+			00 05 2A 00 00 00 EF
+			00 05 2B 00 00 01 3F
+			00 01 2C
+			00 07 F0 02 02 00 08 0C 10
+			00 07 F1 01 00 00 14 1D 0E
+			00 07 F2 10 09 37 04 04 48
+			00 07 F3 10 0B 3F 05 05 4E
+			00 07 F4 0D 19 17 1D 1E 0F
+			00 07 F5 06 12 13 1A 1B 0F
+			78 01 11
+			00 01 29
+			00 01 2C];
+		qcom,mdss-spi-off-command = [20 01 28
+			20 01 10];
+		qcom,mdss-spi-bl-min-level = <1>;
+		qcom,mdss-spi-bl-max-level = <4095>;
+		qcom,esd-check-enabled;
+		qcom,mdss-spi-panel-status-check-mode = "reg_read";
+		qcom,mdss-spi-panel-status-reg = /bits/ 8 <0x09>;
+		qcom,mdss-spi-panel-status-read-length = <4>;
+		qcom,mdss-spi-panel-status-value = /bits/ 8 <0x52 0x29 0x83 0x00>;
+	};
+};
+
+mdss-spi-display is a spi interface display which support send frame
+data and command to panel, compatible with SPI interface specification.
+
+Required properties:
+- compatible:	This property applies to SPI panels only.
+			compatible = "qcom,mdss-spi-display".
+- vdd-supply:	Phandle for vdd regulator device node.
+- vddio-supply:	Phandle for vdd-io regulator device node.
+- qcom,mdss-fb-map:	pHandle that specifies the framebuffer to which the interface is mapped.
+- qcom,mdss-mdp:	pHandle that specifies the mdss-mdp device.
+- qcom,panel-supply-entries:	A node that lists the elements of the supply used to
+				power the DSI panel. There can be more than one instance
+				of this binding, in which case the entry would be appended
+				with the supply entry index. For a detailed description
+				fields in the supply entry, refer to the qcom,ctrl-supply-entries
+				binding above.
+- qcom,platform-spi-dc-gpio: 	Pull down this gpio indicate current package is command,
+				Pull up this gpio indicate current package is parameter or pixels.
+
+Optional properties:
+- label:A string used to describe the controller used.
+	-- qcom,supply-name: name of the supply (vdd/vdda/vddio)
+	-- qcom,supply-min-voltage: minimum voltage level (uV)
+	-- qcom,supply-max-voltage: maximum voltage level (uV)
+	-- qcom,supply-enable-load: load drawn (uA) from enabled supply
+	-- qcom,supply-disable-load: load drawn (uA) from disabled supply
+	-- qcom,supply-pre-on-sleep: time to sleep (ms) before turning on
+	-- qcom,supply-post-on-sleep: time to sleep (ms) after turning on
+	-- qcom,supply-pre-off-sleep: time to sleep (ms) before turning off
+	-- qcom,supply-post-off-sleep: time to sleep (ms) after turning off
+
+Example:
+	mdss_spi: qcom,mdss_spi {
+		compatible = "qcom,mdss-spi-display";
+		label = "mdss spi panel";
+
+		qcom,mdss-fb-map = <&mdss_fb0>;
+		qcom,mdss-mdp = <&mdss_mdp>;
+		vdd-supply = <&pm8909_l17>;
+		vddio-supply = <&pm8909_l6>;
+		qcom,platform-spi-dc-gpio = <&msm_gpio 110 0>;
+
+		qcom,panel-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,panel-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdd";
+				qcom,supply-min-voltage = <2850000>;
+				qcom,supply-max-voltage = <2850000>;
+				qcom,supply-enable-load = <100000>;
+				qcom,supply-disable-load = <100>;
+			};
+
+			qcom,panel-supply-entry@1 {
+				reg = <1>;
+				qcom,supply-name = "vddio";
+				qcom,supply-min-voltage = <1800000>;
+				qcom,supply-max-voltage = <1800000>;
+				qcom,supply-enable-load = <100000>;
+				qcom,supply-disable-load = <100>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt b/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt
index 87a551ba..885be72 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt
@@ -24,6 +24,7 @@
  - synaptics,reset-on-state      : reset gpio active state.
  - synaptics,power-on-state      : power switch active state.
  - synaptics,ub-i2c-addr	 : microbootloader mode I2C slave address.
+ - synaptics,do-not-disable-regulators	   : If specified, regulators cannot be disabled/enabled during suspend/resume.
  - synaptics,cap-button-codes  : virtual key code mappings to be used.
  - synaptics,vir-button-codes  : virtual key code and the response region on panel.
  - synaptics,wakeup-gestures-en: enable wakeup gestures.
@@ -33,6 +34,7 @@
  - synaptics,reset-active-ms	   : reset active duration for controller (ms), default 100.
  - synaptics,power-delay-ms	   : power delay for controller (ms), default 100.
  - synaptics,max-y-for-2d	   : maximal y value of the panel.
+ - synaptics,bus-lpm-cur-uA	   : I2C bus idle mode current setting.
  - synaptics,swap-axes		   : specify whether to swap axes.
  - synaptics,resume-in-workqueue	: specify whether to defer the resume to workqueue.
  - clock-names			: Clock names used for secure touch. They are: "iface_clk", "core_clk"
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 4c29cda..fdc3418 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -57,6 +57,10 @@
   occupied by the redistributors. Required if more than one such
   region is present.
 
+- ignored-save-restore-irqs: Array of u32 elements, specifying the interrupts
+  which are ignored while doing gicd save/restore. Maximum of 10 elements
+  is supported at present.
+
 Sub-nodes:
 
 PPI affinity can be expressed as a single "ppi-partitions" node,
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 4839df4..23e5f20 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -87,11 +87,6 @@
 		When qcom,enable-static-cb is selected, indicates which
 		iommu context banks may be used by HLOS.
 
-- qcom,hibernation-support:
-		A boolean, indicates that hibernation should be supported and
-		all secure usecases should be disabled, since they cannot be
-		restored properly.
-
 - qcom,skip-init : Disable resetting configuration for all context banks
                   during device reset.  This is useful for targets where
                   some context banks are dedicated to other execution
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt
index 1a76d5d..258504e 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt
@@ -116,6 +116,11 @@
   Definition: Short circuit debounce cycles for internal PWM.
 		Allowed values: 0, 8, 16 or 32.
 
+- vcc_pon-supply
+  Usage:      optional
+  Value type: <phandle>
+  Definition: PON driver regulator required to force MBG_ON
+
 Following properties are specific only to LRA vibrators.
 
 - qcom,lra-auto-mode
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
index 66eaae1..e1e486f 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
@@ -71,6 +71,11 @@
   Value type: <u32>
   Definition: CAM HW Version information.
 
+- camnoc-axi-min-ib-bw
+  Usage: optional
+  Value type: <u64>
+  Definition: Min camnoc axi bw for the given target.
+
 - regulator-names
   Usage: required
   Value type: <string>
diff --git a/Documentation/devicetree/bindings/misc/fpc,fpc1028.txt b/Documentation/devicetree/bindings/misc/fpc,fpc1028.txt
new file mode 100644
index 0000000..233c7cc
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/fpc,fpc1028.txt
@@ -0,0 +1,85 @@
+Fingerprint Cards AB. Fpc1028 driver
+
+The fpc1028 fingerprint sensor is connected to the host processor via SPI.
+The sensor will generates interrupts when the user touches the sensor.
+The host controller is expected to read data over SPI and pass the data to
+the rest of the system.
+
+This binding document describes the properties for this module.
+
+Properties:
+
+- compatible
+  Usage:      required
+  Value type: <string>
+  Definition: It must be "fpc,fpc1020"
+
+- interrupts
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: Peripheral interrupt specifier.
+
+- interrupt-parent
+  Usage:      required
+  Value type: <phandle>
+  Definition: phandle of the interrupt controller which services the
+  summary interrupt.
+
+- fpc,gpio_rst
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: GPIO which connecting to the reset pin of fpc1028
+
+- fpc,gpio_irq
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: Specifies the GPIO which connecting to the irq pin of fpc1028.
+
+- vcc_spi-supply
+  Usage:      required
+  Value type: <phandle>
+  Definition: The phandle of the regulator which supplies fpc1028 spi bus core.
+
+- vcc_io-supply
+  Usage:      required
+  Value type: <phandle>
+  Definition: The phandle of the regulator which supplies fpc1028 io pins.
+
+- vcc_ana-supply
+  Usage:      required
+  Value type: <phandle>
+  Definition: The phandle of the regulator which supplies fpc1028 analog circuit.
+
+- pinctrl-names:
+  Usage:      required
+  Value type: <string>
+  Definition: Pinctrl state names for each pin group configuration.
+  eg:"fpc1020_reset_reset", "fpc1020_reset_active", "fpc1020_irq_active".
+  refer to "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt"
+
+- pinctrl-n:
+  Usage:      required
+  Value type: <string>
+  Definition: pinctrl state for each pin group
+  refer to "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt"
+
+
+Example:
+
+	fpc1020 {
+			compatible = "fpc,fpc1020";
+			interrupt-parent = <&tlmm>;
+			interrupts = <48 0>;
+			fpc,gpio_rst = <&tlmm 124 0x0>;
+			fpc,gpio_irq = <&tlmm 48 0>;
+			vcc_spi-supply = <&pm8953_l5>;
+			vdd_io-supply  = <&pm8953_l5>;
+			vdd_ana-supply = <&pm8953_l5>;
+			fpc,enable-on-boot;
+			pinctrl-names = "fpc1020_reset_reset",
+					"fpc1020_reset_active",
+					"fpc1020_irq_active";
+			pinctrl-0 = <&msm_gpio_124>;
+			pinctrl-1 = <&msm_gpio_124_output_high>;
+			pinctrl-2 = <&msm_gpio_48>;
+		};
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index 8b63075..7652aa4 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -87,6 +87,8 @@
                                 need to be unlocked by TZ.
 - qcom,ipa-uc-monitor-holb:   	Boolean context flag to indicate whether
                                 monitoring of holb via IPA uc is required.
+- qcom,wlan-ce-db-over-pcie: Boolean context flag to represent WLAN CE DB
+				over pcie bus or not.
 
 IPA pipe sub nodes (A2 static pipes configurations):
 
@@ -118,6 +120,10 @@
     controller phandle and "clk_ipa_clk" as macro for "iface_clk"
 - clock-names: This property shall contain the clock input names used
     by driver in same order as the clocks property.This should be "iface_clk"
+- emulator-bar0-offset: Specifies the offset, within PCIe BAR0, where
+    IPA/GSI programmable registers reside.  This property is used only
+    with the IPA/GSI emulation system, which is connected to and
+    communicated with via PCIe.
 
 IPA SMMU sub nodes
 
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt
index f6a7a1b..1e44686 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt
@@ -109,6 +109,10 @@
 					this. If this property is not specified,
 					low battery voltage threshold will be
 					configured to 4200 mV.
+- qcom,fg-rconn-mohm:			Battery connector resistance (Rconn) in
+					milliohms. If Rconn is specified, then
+					Rslow values will be updated to account
+					it for an accurate ESR.
 - qcom,cycle-counter-en:		Boolean property which enables the cycle
 					counter feature. If this property is
 					present, then the following properties
@@ -143,6 +147,14 @@
 					battery voltage shadow and the current
 					predicted voltage in uV to initiate
 					capacity learning.
+- qcom,cl-max-limit-deciperc:		The maximum percent that the capacity
+					cannot go above during any capacity
+					learning cycle. This property is in the
+					unit of .1% increments.
+- qcom,cl-min-limit-deciperc:		The minimum percent that the capacity
+					cannot go below during any capacity
+					learning cycle. This property is in the
+					unit of .1% increments.
 - qcom,capacity-estimation-on:		A boolean property to have the fuel
 					gauge driver attempt to estimate the
 					battery capacity using battery
@@ -178,6 +190,97 @@
 					settings will be different from default.
 					Once SOC crosses 5%, ESR pulse timings
 					will be restored back to default.
+- qcom,fg-control-slope-limiter:	A boolean property to specify if SOC
+					slope limiter coefficients needs to
+					be modified based on charging status
+					and battery temperature threshold.
+- qcom,fg-slope-limit-temp-threshold:	Temperature threshold in decidegC used
+					for applying the slope coefficient based
+					on charging status and battery
+					temperature. If this property is not
+					specified, a default value of 100 (10C)
+					will be applied by default.
+- qcom,fg-slope-limit-low-temp-chg:	When the temperature goes below the
+					specified temperature threshold and
+					battery is charging, slope coefficient
+					specified with this property will be
+					applied. If this property is not
+					specified, a default value of 45 will be
+					applied.
+- qcom,fg-slope-limit-low-temp-dischg:	Same as "qcom,fg-slope-limit-low-temp-chg"
+					except this is when the battery is
+					discharging.
+- qcom,fg-slope-limit-high-temp-chg:	When the temperature goes above the
+					specified temperature threshold and
+					battery is charging, slope coefficient
+					specified with this property will be
+					applied. If this property is not
+					specified, a default value of 2 will be
+					applied.
+- qcom,fg-slope-limit-high-temp-dischg:	Same as "qcom,fg-slope-limit-high-temp-chg"
+					except this is when the battery is
+					discharging.
+- qcom,fg-dischg-voltage-gain-ctrl:	A boolean property to specify if the
+					voltage gain needs to be modified
+					during discharging based on monotonic
+					soc.
+- qcom,fg-dischg-voltage-gain-soc:	Array of monotonic SOC threshold values
+					to change the voltage gain settings
+					during discharge. This should be defined
+					in the ascending order and in the range
+					of 0-100. Array limit is set to 3.
+					If qcom,fg-dischg-voltage-gain-ctrl is
+					set, then this property should be
+					specified to apply the gain settings.
+- qcom,fg-dischg-med-voltage-gain:	Array of voltage gain values that needs
+					to be applied to medC voltage gain when
+					the monotonic SOC goes below the SOC
+					threshold specified under
+					qcom,fg-dischg-voltage-gain-soc. Array
+					limit is set to 3.
+					If qcom,fg-dischg-voltage-gain-ctrl is
+					set, then this property should be
+					specified to apply the gain setting.
+- qcom,fg-dischg-high-voltage-gain:	Array of voltage gain values that needs
+					to be applied to highC voltage gain when
+					the monotonic SOC goes below the SOC
+					threshold specified under
+					qcom,fg-dischg-voltage-gain-soc. Array
+					limit is set to 3.
+					If qcom,fg-dischg-voltage-gain-ctrl is
+					set, then this property should be
+					specified to apply the gain setting.
+- qcom,fg-use-vbat-low-empty-soc:	A boolean property to specify whether
+					vbatt-low interrupt is used to handle
+					empty battery condition. If this is
+					not specified, empty battery condition
+					is detected by empty-soc interrupt.
+- qcom,fg-batt-temp-low-limit:		Battery temperature (in decidegC) low
+					limit  which will be used to validate
+					the battery temperature reading from FG.
+					If the battery temperature goes below
+					this limit, last read good temperature
+					will be notified to userspace. If this
+					limit is not specified, then the
+					default limit would be -60C.
+- qcom,fg-batt-temp-high-limit:		Battery temperature (in decidegC) high
+					limit  which will be used to validate
+					the battery temperature reading from FG.
+					If the battery temperature goes above
+					this limit, last read good temperature
+					will be notified to userspace. If this
+					limit is not specified, then the
+					default limit would be 150C.
+- qcom,fg-cc-soc-limit-pct:		Percentage of CC_SOC before resetting
+					FG and restore the full CC_SOC value.
+- qcom,fg-restore-batt-info:		A boolean property to specify whether
+					battery parameters needs to be
+					restored. If this feature is enabled,
+					then validating the battery parameters
+					by OCV/battery SOC, validation range
+					in percentage should be specified via
+					appropriate module parameters to make
+					it work properly.
 
 qcom,fg-soc node required properties:
 - reg : offset and length of the PMIC peripheral register map.
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt
index afeb65d..0da71a3 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt
@@ -262,6 +262,45 @@
 		    capacity learning cycle. If this is not specified, then
 		    the default value is 0. Unit is in decipercentage.
 
+- qcom,esr-disable
+	Usage:      optional
+	Value type: <bool>
+	Definition: Boolean property to disable ESR estimation. If not defined
+		    ESR estimation stays enabled for charge-cycles.
+
+- qcom,esr-discharge-enable
+	Usage:      optional
+	Value type: <bool>
+	Definition: Boolean property to enable ESR estimation during discharge.
+		    Only valid if 'qcom,esr-disable' is not defined.
+
+- qcom,esr-qual-current-ua
+	Usage:      optional
+	Value type: <u32>
+	Definition: Minimum current differential in uA to qualify an ESR
+		    reading as valid. If not defined the value defaults
+		    to 130mA.
+
+- qcom,esr-qual-vbatt-uv
+	Usage:      optional
+	Value type: <u32>
+	Definition: Minimum vbatt differential in uV to qualify an ESR
+		    reading as valid. If not defined the value defaults
+		    to 7mV.
+
+- qcom,esr-disable-soc
+	Usage:      optional
+	Value type: <u32>
+	Definition: Minimum battery SOC below which ESR will not be
+		    attempted by QG. If not defined the value defaults
+		    to 10%.
+
+- qcom,qg-ext-sns
+	Usage:      optional
+	Value type: <bool>
+	Definition: Boolean property to support external-rsense based
+		    configuration.
+
 ==========================================================
 Second Level Nodes - Peripherals managed by QGAUGE driver
 ==========================================================
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index afa8009..9de24c3 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -193,6 +193,12 @@
 		to be get from these properties defined in battery profile:
 		qcom,step-chg-ranges, qcom,jeita-fcc-ranges, qcom,jeita-fv-ranges.
 
+- qcom,disable-stat-sw-override
+  Usage:      optional
+  Value type: bool
+  Definition: Boolean flag which when present disables STAT pin default software
+		override configuration.
+
 =============================================
 Second Level Nodes - SMB2 Charger Peripherals
 =============================================
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
index 8ee2749..65dcf59 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
@@ -201,6 +201,25 @@
 	Definition: Phandle for the VADC node, it is used to obtain USBIN_V
 		    and USBIN_I readings on PMIC632 based platform.
 
+- qcom,hw-die-temp-mitigation
+	Usage:      optional
+	Value type: bool
+	Definition: Boolean flag which when present enables h/w based thermal
+		    mitigation.
+
+- qcom,hw-connector-mitigation
+	Usage:      optional
+	Value type: bool
+	Definition: Boolean flag which when present enables h/w based
+		    connector temperature mitigation.
+
+- qcom,connector-internal-pull-kohm
+	Usage:      optional
+	Value type: <u32>
+	Definition: Specifies internal pull-up configuration to be applied to
+		    connector THERM, only valid values are (0/30/100/400).
+		    If not specified 100K is used as default pull-up.
+
 =============================================
 Second Level Nodes - SMB5 Charger Peripherals
 =============================================
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt
index 4f12ec0..528c285 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt
@@ -51,6 +51,18 @@
 	      stacked one after the other and thus all the charge current
 	      (FCC) flows through main. In a non-stacked configuration each
 	      charger controls the charge current (FCC) separately.
+
+- qcom,die-temp-threshold-degc
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies DIE temp threshold beyond which h/w starts mitigation.
+	      If not sepcified, 90 degrees centigrade is used.
+
+- qcom,hw-die-temp-mitigation
+  Usage:      optional
+  Value type: bool
+  Definition: Boolean property to enable h/w controlled die temp mitigation.
+
 ================================================
 Second Level Nodes - SMB1355 Charger Peripherals
 ================================================
diff --git a/Documentation/devicetree/bindings/pwm/pwm-qti-lpg.txt b/Documentation/devicetree/bindings/pwm/pwm-qti-lpg.txt
index ddd90e1..2a84dd6 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-qti-lpg.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-qti-lpg.txt
@@ -20,7 +20,8 @@
 	Value type: <string>
 	Definition: The name of the register defined in the reg property.
 		      It must have "lpg-base", "lut-base" is optional but
-		      it's required if any LPG channels support LUT mode.
+		      it's required if any LPG channels support LUT mode
+		      with a LUT module.
 
 - #pwm-cells:
 	Usage: required
@@ -30,14 +31,47 @@
 		      the PWM channel ID indexed from 0, and the second
 		      cell is the PWM default period in nanoseconds.
 
+- nvmem-names:
+	Usage: optional
+	Value type: <string>
+	Definition: The nvmem device name for the SDAM module where the LUT
+		      pattern is stored. It must be "ppg_sdam". This property
+		      is required only when LUT mode is supported with a SDAM
+		      module instead of a LUT module.
+
+- nvmem:
+	Usage: optional
+	Value type: <phandle>
+	Definition: Phandle of the nvmem device to access the LUT stored
+		      in the SDAM module. This property is required only when
+		      LUT mode is supported and the LUT pattern is stored in a
+		      SDAM module instead of a LUT module.
+
+- qcom,pbs-client
+	Usage: optional
+	Value type: <phandle>
+	Definition: Phandle of the PBS client used for sending the PBS
+		      trigger. This property is required when LUT mode is
+		      supported and the LUT pattern is stored in a SDAM
+		      module instead of a LUT module.
+
+- qcom,lut-sdam-base:
+	Usage: optional
+	Value type: <u32>
+	Definition: The register base of the LUT entries stored in SDAM. This
+		      property is required only when LUT mode is supported and
+		      the LUT pattern is stored in a SDAM module instead of a
+		      LUT module.
+
 - qcom,lut-patterns:
 	Usage: optional
 	Value type: <prop-encoded-array>
 	Definition: Duty ratios in percentages for LPG working at LUT mode.
 		      These duty ratios will be translated into PWM values
-		      and stored in LUT module. The LUT module has resource
-		      to store 47 PWM values at max and shared for all LPG
-		      channels. This property is required if any LPG channels
+		      and stored in LUT or SDAM module shared for all LPG
+		      channels. The LUT module has resource to store 47 PWM
+		      values at max while SDAM module can store upto 64 PWM
+		      values. This property is required if any LPG channels
 		      support LUT mode.
 
 Subnode is optional if LUT mode is not required, it's required if any LPG
@@ -54,31 +88,37 @@
 		      range is 1 - 8. Maximum value depends on the number of
 		      channels supported on PMIC.
 
+- qcom,lpg-sdam-base:
+	Usage: optional
+	Value type: <u32>
+	Definition: Register base address for LPG configuration in SDAM for
+		      the LPG channel specified under "qcom,lpg-chan-id".
+		      This property is required if LUT mode is supported with
+		      a SDAM module.
+
 - qcom,ramp-step-ms:
 	Usage: required
 	Value type: <u32>
 	Definition: The step duration in milliseconds for LPG staying at each
-		      duty specified in the LUT pattern. Allowed range is
-		      1 - 511.
+		      duty specified in the LUT pattern. Allowed range:
+		      1 - 511 when LUT module is used, and 8 - 2000 when SDAM
+		      is used.
 
 - qcom,ramp-high-index:
 	Usage: required
 	Value type: <u32>
 	Definition: The high index of the LUT pattern where LPG ends up
-		      ramping to. Allowed range is 1 - 47.
+		      ramping to. Allowed range: 1 - 47 when LUT module
+		      is used, and 1 - 64 when SDAM module is used.
 
 - qcom,ramp-low-index:
 	Usage: required
 	Value type: <u32>
 	Definition: The low index of the LUT pattern from where LPG begins
-		      ramping from. Allowed range is 0 - 46.
-
-- qcom,ramp-from-low-to-high:
-	Usage: optional
-	Value type: <empty>
-	Definition: The flag to specify the LPG ramping direction. The ramping
-		      direction is from low index to high index of the LUT
-		      pattern if it's specified.
+		      ramping from. The ramp-low-index should be always less
+		      than ramp-high-index when SDAM module is used. Allowed
+		      range: 0 - 46 when LUT module is used, and 0 - 63 when
+		      SDAM module is used.
 
 - qcom,ramp-pattern-repeat:
 	Usage: optional
@@ -86,34 +126,66 @@
 	Definition: The flag to specify if LPG would be ramping with the LUT
 		      pattern repeatedly.
 
+- qcom,ramp-from-low-to-high:
+	Usage: optional
+	Value type: <empty>
+	Definition: The flag to specify the LPG ramping direction. The ramping
+		      direction is from low index to high index of the LUT
+		      pattern if it's specified. This property is not required
+		      when SDAM module is used.
+
 - qcom,ramp-toggle:
 	Usage: optional
 	Value type: <empty>
 	Definition: The flag to specify if LPG would toggle the LUT pattern
 		      in ramping. If toggling enabled, LPG would return to the
 		      low index when high index is reached, or return to the high
-		      index when low index is reached.
+		      index when low index is reached. This property is not
+		      required when SDAM module is used.
 
 - qcom,ramp-pause-hi-count:
 	Usage: optional
 	Value type: <u32>
 	Definition: The step count that LPG stop the output when it ramped up
-		      to the high index of the LUT.
+		      to the high index of the LUT. This property is not
+		      required when SDAM module is used.
 
 - qcom,ramp-pause-lo-count:
 	Usage: optional
 	Value type: <u32>
 	Definition: The step count that LPG stop the output when it ramped up
-		      to the low index of the LUT.
-Example:
+		      to the low index of the LUT. This property is not
+		      required when SDAM module is used.
 
-	pmi8998_lpg: lpg@b100 {
+Example when LUT pattern is stored in a LUT module:
+
+	pm8150l_lpg: lpg@b100 {
 		compatible = "qcom,pwm-lpg";
 		reg = <0xb100 0x600>, <0xb000 0x100>;
 		reg-names = "lpg-base", "lut-base";
 		#pwm-cells = <2>;
 		qcom,lut-patterns = <0 14 28 42 56 70 84 100
 					100 84 70 56 42 28 14 0>;
+		lpg@1 {
+			qcom,lpg-chan-id = <1>;
+			qcom,ramp-step-ms = <200>;
+			qcom,ramp-pause-hi-count = <10>;
+			qcom,ramp-pause-lo-count = <10>;
+			qcom,ramp-low-index = <0>;
+			qcom,ramp-high-index = <15>;
+			qcom,ramp-from-low-to-high;
+			qcom,ramp-pattern-repeat;
+		};
+		lpg@2 {
+			qcom,lpg-chan-id = <2>;
+			qcom,ramp-step-ms = <200>;
+			qcom,ramp-pause-hi-count = <10>;
+			qcom,ramp-pause-lo-count = <10>;
+			qcom,ramp-low-index = <0>;
+			qcom,ramp-high-index = <15>;
+			qcom,ramp-from-low-to-high;
+			qcom,ramp-pattern-repeat;
+		};
 		lpg@3 {
 			qcom,lpg-chan-id = <3>;
 			qcom,ramp-step-ms = <200>;
@@ -124,24 +196,43 @@
 			qcom,ramp-from-low-to-high;
 			qcom,ramp-pattern-repeat;
 		};
-		lpg@4 {
-			qcom,lpg-chan-id = <4>;
+	};
+
+Example when LUT pattern is stored in a SDAM module:
+
+	pmi632_lpg: lpg@b100 {
+		compatible = "qcom,pwm-lpg";
+		reg = <0xb100 0x600>;
+		reg-names = "lpg-base";
+		#pwm-cells = <2>;
+		nvmem-names = "ppg_sdam";
+		nvmem = <&sdam7>;
+		qcom,pbs-client = <&pbs_client_3>;
+		qcom,lut-sdam-base = <0x80>;
+		qcom,lut-patterns = <0 14 28 42 56 70 84 100
+					100 84 70 56 42 28 14 0>;
+		lpg@1 {
+			qcom,lpg-chan-id = <1>;
 			qcom,ramp-step-ms = <200>;
-			qcom,ramp-pause-hi-count = <10>;
-			qcom,ramp-pause-lo-count = <10>;
 			qcom,ramp-low-index = <0>;
 			qcom,ramp-high-index = <15>;
-			qcom,ramp-from-low-to-high;
 			qcom,ramp-pattern-repeat;
+			qcom,lpg-sdam-base = <0x48>:
 		};
-		lpg@5 {
-			qcom,lpg-chan-id = <5>;
+		lpg@2 {
+			qcom,lpg-chan-id = <2>;
 			qcom,ramp-step-ms = <200>;
-			qcom,ramp-pause-hi-count = <10>;
-			qcom,ramp-pause-lo-count = <10>;
 			qcom,ramp-low-index = <0>;
 			qcom,ramp-high-index = <15>;
-			qcom,ramp-from-low-to-high;
 			qcom,ramp-pattern-repeat;
+			qcom,lpg-sdam-base = <0x56>;
+		};
+		lpg@3 {
+			qcom,lpg-chan-id = <3>;
+			qcom,ramp-step-ms = <200>;
+			qcom,ramp-low-index = <0>;
+			qcom,ramp-high-index = <15>;
+			qcom,ramp-pattern-repeat;
+			qcom,lpg-sdam-base = <0x64>;
 		};
 	};
diff --git a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
index 803df6f..6fae8b0 100644
--- a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
@@ -61,6 +61,12 @@
 			in conjunction with "hw-ctrl-addr".
  - qcom,toggle-sw-collapse-in-disable: If set, SW_COLLAPSE bit is toggled
 			in disable call.
+ - qcom,en-few-wait-val: Input value for EN_FEW_WAIT controls state transition
+			 delay after receiving ack signal (gds_enf_ack) from the
+			 longest en_few power switch chain.
+ - qcom,en-rest-wait-val: Input value for EN_REST_WAIT controls state transition
+			  delay after receiving ack signal (gds_enr_ack) from the
+			  longest en_rest power switch chain.
 
 Example:
 	gdsc_oxili_gx: qcom,gdsc@fd8c4024 {
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 1e4d2c1..20cb25b 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -300,6 +300,11 @@
  - compatible : "qcom,msm-audio-apr"
 		This device is added to represent APR module.
 
+ - qcom,subsys-name: This value provides the subsystem name where codec
+		is present. It can be "apr_modem" or "apr_adsp". This
+		property enable apr driver to receive subsystem up/down
+		notification from modem/adsp.
+
 Optional properties:
 
  - compatible : "qcom,msm-audio-apr-dummy"
@@ -664,6 +669,8 @@
 		msm_audio_apr_dummy {
 			compatible = "qcom,msm-audio-apr-dummy";
 		};
+
+		qcom,subsys-name = "apr_adsp";
 	};
 
 	qcom,msm-ocmem-audio {
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
index 866d004..8b6d5a2 100644
--- a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
@@ -28,6 +28,7 @@
 Optional properties:
 - qcom,rt:	Specifies if the framework worker thread for this
 		controller device should have "real-time" priority.
+- qcom,disable-autosuspend: Specifies to disable runtime PM auto suspend.
 
 SPI slave nodes must be children of the SPI master node and can contain
 properties described in Documentation/devicetree/bindings/spi/spi-bus.txt
diff --git a/Documentation/devicetree/bindings/thermal/tsens.txt b/Documentation/devicetree/bindings/thermal/tsens.txt
index 6ff6e9b..f25691a 100644
--- a/Documentation/devicetree/bindings/thermal/tsens.txt
+++ b/Documentation/devicetree/bindings/thermal/tsens.txt
@@ -20,6 +20,7 @@
 	       should be "qcom,sdm845-tsens" for SDM845 TSENS driver.
 	       should be "qcom,tsens24xx" for 2.4 TSENS controller.
 	       should be "qcom,msm8937-tsens" for 8937 TSENS driver.
+	       should be "qcom,msm8909-tsens" for 8909 TSENS driver.
 	       The compatible property is used to identify the respective controller to use
 	       for the corresponding SoC.
 - reg : offset and length of the TSENS registers with associated property in reg-names
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index 7f79f40..f075a2a 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -74,6 +74,7 @@
 			  Defaults to 26 MHz if not specified.
 - extcon:       phandle to external connector (Refer Documentation/devicetree/bindings/extcon/extcon-gpio.txt for more details).
 - non-removable		: defines if the connected ufs device is not removable
+- force-ufshc-probe	: For force probing UFS device (non removable) even if it is not the boot device.
 
 
 Note: If above properties are not defined it can be assumed that the supply
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index 5256edd..431c32a 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -189,6 +189,8 @@
 	point to external connector device, which provide "USB-HOST" cable events.
 	A single phandle may be specified if a single connector device provides
 	both "USB" and "USB-HOST" events.
+- qcom,phy-id-high-as-peripheral: If present, specifies device to switch to device mode
+	if PHY ID state is high or host mode if PHY ID state is low.
 
 Example HSUSB OTG controller device node :
 	usb@f9690000 {
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index b880890..c28b05b 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -191,6 +191,8 @@
  - qcom,tune2-efuse-correction: The value to be adjusted from fused value for
    improved rise/fall times.
  - qcom,host-chirp-erratum: Indicates host chirp fix is required.
+ - qcom,override-bias-ctrl2: Indicates override is done from driver for
+   BIAS_CTRL2 register.
  - nvmem-cells: specifies the handle to represent the SoC revision.
    usually it is defined by qfprom device node.
  - nvmem-cell-names: specifies the given nvmem cell name as defined in
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index e1be5fd..a22edb5 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -103,6 +103,7 @@
 fcs	Fairchild Semiconductor
 firefly	Firefly
 focaltech	FocalTech Systems Co.,Ltd
+fpc	Fingerprint Cards AB.
 friendlyarm	Guangzhou FriendlyARM Computer Tech Co., Ltd
 fsl	Freescale Semiconductor
 ge	General Electric Company
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index e85f9e1..193a034 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -179,13 +179,15 @@
                        passes down hints with its policy.
 alloc_mode=%s          Adjust block allocation policy, which supports "reuse"
                        and "default".
-fsync_mode=%s          Control the policy of fsync. Currently supports "posix"
-                       and "strict". In "posix" mode, which is default, fsync
-                       will follow POSIX semantics and does a light operation
-                       to improve the filesystem performance. In "strict" mode,
-                       fsync will be heavy and behaves in line with xfs, ext4
-                       and btrfs, where xfstest generic/342 will pass, but the
-                       performance will regress.
+fsync_mode=%s          Control the policy of fsync. Currently supports "posix",
+                       "strict", and "nobarrier". In "posix" mode, which is
+                       default, fsync will follow POSIX semantics and does a
+                       light operation to improve the filesystem performance.
+                       In "strict" mode, fsync will be heavy and behaves in line
+                       with xfs, ext4 and btrfs, where xfstest generic/342 will
+                       pass, but the performance will regress. "nobarrier" is
+                       based on "posix", but doesn't issue flush command for
+                       non-atomic files likewise "nobarrier" mount option.
 test_dummy_encryption  Enable dummy encryption, which provides a fake fscrypt
                        context. The fake fscrypt context is used by xfstests.
 
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 0b8f21f..435a509 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2662,6 +2662,9 @@
 
 	noalign		[KNL,ARM]
 
+	noaltinstr	[S390] Disables alternative instructions patching
+			(CPU alternatives feature).
+
 	noapic		[SMP,APIC] Tells the kernel to not make use of any
 			IOAPICs that may be present in the system.
 
@@ -2718,6 +2721,9 @@
 			allow data leaks with this option, which is equivalent
 			to spectre_v2=off.
 
+	nospec_store_bypass_disable
+			[HW] Disable all mitigations for the Speculative Store Bypass vulnerability
+
 	noxsave		[BUGS=X86] Disables x86 extended register state save
 			and restore using xsave. The kernel will fallback to
 			enabling legacy floating-point and sse state.
@@ -3992,6 +3998,48 @@
 			Not specifying this option is equivalent to
 			spectre_v2=auto.
 
+	spec_store_bypass_disable=
+			[HW] Control Speculative Store Bypass (SSB) Disable mitigation
+			(Speculative Store Bypass vulnerability)
+
+			Certain CPUs are vulnerable to an exploit against a
+			a common industry wide performance optimization known
+			as "Speculative Store Bypass" in which recent stores
+			to the same memory location may not be observed by
+			later loads during speculative execution. The idea
+			is that such stores are unlikely and that they can
+			be detected prior to instruction retirement at the
+			end of a particular speculation execution window.
+
+			In vulnerable processors, the speculatively forwarded
+			store can be used in a cache side channel attack, for
+			example to read memory to which the attacker does not
+			directly have access (e.g. inside sandboxed code).
+
+			This parameter controls whether the Speculative Store
+			Bypass optimization is used.
+
+			on      - Unconditionally disable Speculative Store Bypass
+			off     - Unconditionally enable Speculative Store Bypass
+			auto    - Kernel detects whether the CPU model contains an
+				  implementation of Speculative Store Bypass and
+				  picks the most appropriate mitigation. If the
+				  CPU is not vulnerable, "off" is selected. If the
+				  CPU is vulnerable the default mitigation is
+				  architecture and Kconfig dependent. See below.
+			prctl   - Control Speculative Store Bypass per thread
+				  via prctl. Speculative Store Bypass is enabled
+				  for a process by default. The state of the control
+				  is inherited on fork.
+			seccomp - Same as "prctl" above, but all seccomp threads
+				  will disable SSB unless they explicitly opt out.
+
+			Not specifying this option is equivalent to
+			spec_store_bypass_disable=auto.
+
+			Default mitigations:
+			X86:	If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
+
 	spia_io_base=	[HW,MTD]
 	spia_fio_base=
 	spia_pedr=
diff --git a/Documentation/spec_ctrl.txt b/Documentation/spec_ctrl.txt
new file mode 100644
index 0000000..32f3d55
--- /dev/null
+++ b/Documentation/spec_ctrl.txt
@@ -0,0 +1,94 @@
+===================
+Speculation Control
+===================
+
+Quite some CPUs have speculation-related misfeatures which are in
+fact vulnerabilities causing data leaks in various forms even across
+privilege domains.
+
+The kernel provides mitigation for such vulnerabilities in various
+forms. Some of these mitigations are compile-time configurable and some
+can be supplied on the kernel command line.
+
+There is also a class of mitigations which are very expensive, but they can
+be restricted to a certain set of processes or tasks in controlled
+environments. The mechanism to control these mitigations is via
+:manpage:`prctl(2)`.
+
+There are two prctl options which are related to this:
+
+ * PR_GET_SPECULATION_CTRL
+
+ * PR_SET_SPECULATION_CTRL
+
+PR_GET_SPECULATION_CTRL
+-----------------------
+
+PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
+which is selected with arg2 of prctl(2). The return value uses bits 0-3 with
+the following meaning:
+
+==== ===================== ===================================================
+Bit  Define                Description
+==== ===================== ===================================================
+0    PR_SPEC_PRCTL         Mitigation can be controlled per task by
+                           PR_SET_SPECULATION_CTRL.
+1    PR_SPEC_ENABLE        The speculation feature is enabled, mitigation is
+                           disabled.
+2    PR_SPEC_DISABLE       The speculation feature is disabled, mitigation is
+                           enabled.
+3    PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
+                           subsequent prctl(..., PR_SPEC_ENABLE) will fail.
+==== ===================== ===================================================
+
+If all bits are 0 the CPU is not affected by the speculation misfeature.
+
+If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is
+available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
+misfeature will fail.
+
+PR_SET_SPECULATION_CTRL
+-----------------------
+
+PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
+is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
+in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or
+PR_SPEC_FORCE_DISABLE.
+
+Common error codes
+------------------
+======= =================================================================
+Value   Meaning
+======= =================================================================
+EINVAL  The prctl is not implemented by the architecture or unused
+        prctl(2) arguments are not 0.
+
+ENODEV  arg2 is selecting a not supported speculation misfeature.
+======= =================================================================
+
+PR_SET_SPECULATION_CTRL error codes
+-----------------------------------
+======= =================================================================
+Value   Meaning
+======= =================================================================
+0       Success
+
+ERANGE  arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
+        PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE.
+
+ENXIO   Control of the selected speculation misfeature is not possible.
+        See PR_GET_SPECULATION_CTRL.
+
+EPERM   Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller
+        tried to enable it again.
+======= =================================================================
+
+Speculation misfeature controls
+-------------------------------
+- PR_SPEC_STORE_BYPASS: Speculative Store Bypass
+
+  Invocations:
+   * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 1f5eab4..e46c14f 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2118,6 +2118,9 @@
 ARM 64-bit FP registers have the following id bit patterns:
   0x4030 0000 0012 0 <regno:12>
 
+ARM firmware pseudo-registers have the following bit pattern:
+  0x4030 0000 0014 <regno:16>
+
 
 arm64 registers are mapped using the lower 32 bits. The upper 16 of
 that is the register group type, or coprocessor number:
@@ -2134,6 +2137,9 @@
 arm64 system registers have the following id bit patterns:
   0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
 
+arm64 firmware pseudo-registers have the following bit pattern:
+  0x6030 0000 0014 <regno:16>
+
 
 MIPS registers are mapped using the lower 32 bits.  The upper 16 of that is
 the register group type:
@@ -2656,7 +2662,8 @@
 	  and execute guest code when KVM_RUN is called.
 	- KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
 	  Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
-	- KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU.
+	- KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 (or a future revision
+          backward compatible with v0.2) for the CPU.
 	  Depends on KVM_CAP_ARM_PSCI_0_2.
 	- KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU.
 	  Depends on KVM_CAP_ARM_PMU_V3.
diff --git a/Documentation/virtual/kvm/arm/psci.txt b/Documentation/virtual/kvm/arm/psci.txt
new file mode 100644
index 0000000..aafdab8
--- /dev/null
+++ b/Documentation/virtual/kvm/arm/psci.txt
@@ -0,0 +1,30 @@
+KVM implements the PSCI (Power State Coordination Interface)
+specification in order to provide services such as CPU on/off, reset
+and power-off to the guest.
+
+The PSCI specification is regularly updated to provide new features,
+and KVM implements these updates if they make sense from a virtualization
+point of view.
+
+This means that a guest booted on two different versions of KVM can
+observe two different "firmware" revisions. This could cause issues if
+a given guest is tied to a particular PSCI revision (unlikely), or if
+a migration causes a different PSCI version to be exposed out of the
+blue to an unsuspecting guest.
+
+In order to remedy this situation, KVM exposes a set of "firmware
+pseudo-registers" that can be manipulated using the GET/SET_ONE_REG
+interface. These registers can be saved/restored by userspace, and set
+to a convenient value if required.
+
+The following register is defined:
+
+* KVM_REG_ARM_PSCI_VERSION:
+
+  - Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set
+    (and thus has already been initialized)
+  - Returns the current PSCI version on GET_ONE_REG (defaulting to the
+    highest PSCI version implemented by KVM and compatible with v0.2)
+  - Allows any PSCI version implemented by KVM and compatible with
+    v0.2 to be set with SET_ONE_REG
+  - Affects the whole VM (even if the register view is per-vcpu)
diff --git a/Makefile b/Makefile
index bf5ea11..6fac39b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 96
+SUBLEVEL = 106
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h
index f939794..5647469 100644
--- a/arch/alpha/include/asm/futex.h
+++ b/arch/alpha/include/asm/futex.h
@@ -29,18 +29,10 @@
 	:	"r" (uaddr), "r"(oparg)				\
 	:	"memory")
 
-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
 
 	pagefault_disable();
 
@@ -66,17 +58,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
index 0ca9724..7081e52 100644
--- a/arch/alpha/include/asm/xchg.h
+++ b/arch/alpha/include/asm/xchg.h
@@ -11,6 +11,10 @@
  * Atomic exchange.
  * Since it can be used to implement critical sections
  * it must clobber "memory" (also for interrupts in UP).
+ *
+ * The leading and the trailing memory barriers guarantee that these
+ * operations are fully ordered.
+ *
  */
 
 static inline unsigned long
@@ -18,6 +22,7 @@
 {
 	unsigned long ret, tmp, addr64;
 
+	smp_mb();
 	__asm__ __volatile__(
 	"	andnot	%4,7,%3\n"
 	"	insbl	%1,%4,%1\n"
@@ -42,6 +47,7 @@
 {
 	unsigned long ret, tmp, addr64;
 
+	smp_mb();
 	__asm__ __volatile__(
 	"	andnot	%4,7,%3\n"
 	"	inswl	%1,%4,%1\n"
@@ -66,6 +72,7 @@
 {
 	unsigned long dummy;
 
+	smp_mb();
 	__asm__ __volatile__(
 	"1:	ldl_l %0,%4\n"
 	"	bis $31,%3,%1\n"
@@ -86,6 +93,7 @@
 {
 	unsigned long dummy;
 
+	smp_mb();
 	__asm__ __volatile__(
 	"1:	ldq_l %0,%4\n"
 	"	bis $31,%3,%1\n"
@@ -127,10 +135,12 @@
  * store NEW in MEM.  Return the initial value in MEM.  Success is
  * indicated by comparing RETURN with OLD.
  *
- * The memory barrier should be placed in SMP only when we actually
- * make the change. If we don't change anything (so if the returned
- * prev is equal to old) then we aren't acquiring anything new and
- * we don't need any memory barrier as far I can tell.
+ * The leading and the trailing memory barriers guarantee that these
+ * operations are fully ordered.
+ *
+ * The trailing memory barrier is placed in SMP unconditionally, in
+ * order to guarantee that dependency ordering is preserved when a
+ * dependency is headed by an unsuccessful operation.
  */
 
 static inline unsigned long
@@ -138,6 +148,7 @@
 {
 	unsigned long prev, tmp, cmp, addr64;
 
+	smp_mb();
 	__asm__ __volatile__(
 	"	andnot	%5,7,%4\n"
 	"	insbl	%1,%5,%1\n"
@@ -149,8 +160,8 @@
 	"	or	%1,%2,%2\n"
 	"	stq_c	%2,0(%4)\n"
 	"	beq	%2,3f\n"
-		__ASM__MB
 	"2:\n"
+		__ASM__MB
 	".subsection 2\n"
 	"3:	br	1b\n"
 	".previous"
@@ -165,6 +176,7 @@
 {
 	unsigned long prev, tmp, cmp, addr64;
 
+	smp_mb();
 	__asm__ __volatile__(
 	"	andnot	%5,7,%4\n"
 	"	inswl	%1,%5,%1\n"
@@ -176,8 +188,8 @@
 	"	or	%1,%2,%2\n"
 	"	stq_c	%2,0(%4)\n"
 	"	beq	%2,3f\n"
-		__ASM__MB
 	"2:\n"
+		__ASM__MB
 	".subsection 2\n"
 	"3:	br	1b\n"
 	".previous"
@@ -192,6 +204,7 @@
 {
 	unsigned long prev, cmp;
 
+	smp_mb();
 	__asm__ __volatile__(
 	"1:	ldl_l %0,%5\n"
 	"	cmpeq %0,%3,%1\n"
@@ -199,8 +212,8 @@
 	"	mov %4,%1\n"
 	"	stl_c %1,%2\n"
 	"	beq %1,3f\n"
-		__ASM__MB
 	"2:\n"
+		__ASM__MB
 	".subsection 2\n"
 	"3:	br 1b\n"
 	".previous"
@@ -215,6 +228,7 @@
 {
 	unsigned long prev, cmp;
 
+	smp_mb();
 	__asm__ __volatile__(
 	"1:	ldq_l %0,%5\n"
 	"	cmpeq %0,%3,%1\n"
@@ -222,8 +236,8 @@
 	"	mov %4,%1\n"
 	"	stq_c %1,%2\n"
 	"	beq %1,3f\n"
-		__ASM__MB
 	"2:\n"
+		__ASM__MB
 	".subsection 2\n"
 	"3:	br 1b\n"
 	".previous"
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 249e101..b7b78cb 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -495,7 +495,6 @@
 
 config ARC_EMUL_UNALIGNED
 	bool "Emulate unaligned memory access (userspace only)"
-	default N
 	select SYSCTL_ARCH_UNALIGN_NO_WARN
 	select SYSCTL_ARCH_UNALIGN_ALLOW
 	depends on ISA_ARCOMPACT
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
index 11e1b1f..eb887dd 100644
--- a/arch/arc/include/asm/futex.h
+++ b/arch/arc/include/asm/futex.h
@@ -73,20 +73,11 @@
 
 #endif
 
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
-		return -EFAULT;
-
 #ifndef CONFIG_ARC_HAS_LLSC
 	preempt_disable();	/* to guarantee atomic r-m-w of futex op */
 #endif
@@ -118,30 +109,9 @@
 	preempt_enable();
 #endif
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ:
-			ret = (oldval == cmparg);
-			break;
-		case FUTEX_OP_CMP_NE:
-			ret = (oldval != cmparg);
-			break;
-		case FUTEX_OP_CMP_LT:
-			ret = (oldval < cmparg);
-			break;
-		case FUTEX_OP_CMP_GE:
-			ret = (oldval >= cmparg);
-			break;
-		case FUTEX_OP_CMP_LE:
-			ret = (oldval <= cmparg);
-			break;
-		case FUTEX_OP_CMP_GT:
-			ret = (oldval > cmparg);
-			break;
-		default:
-			ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 74dd21b..c51b88e 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -146,8 +146,8 @@
 
 		i2s: i2s@7e203000 {
 			compatible = "brcm,bcm2835-i2s";
-			reg = <0x7e203000 0x20>,
-			      <0x7e101098 0x02>;
+			reg = <0x7e203000 0x24>;
+			clocks = <&clocks BCM2835_CLOCK_PCM>;
 
 			dmas = <&dma 2>,
 			       <&dma 3>;
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
index a1658d0..cf0de77 100644
--- a/arch/arm/boot/dts/bcm958625hr.dts
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -49,7 +49,7 @@
 
 	memory {
 		device_type = "memory";
-		reg = <0x60000000 0x80000000>;
+		reg = <0x60000000 0x20000000>;
 	};
 
 	gpio-restart {
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index 47c9554..2b9c2be 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -88,7 +88,6 @@
 		clocks = <&clks IMX6QDL_CLK_CKO>;
 		VDDA-supply = <&reg_2p5v>;
 		VDDIO-supply = <&reg_3p3v>;
-		lrclk-strength = <3>;
 	};
 };
 
diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
index 58b09bf..2051306 100644
--- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
+++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
@@ -213,37 +213,37 @@
 &iomuxc {
 	pinctrl_enet1: enet1grp {
 		fsl,pins = <
-			MX7D_PAD_SD2_CD_B__ENET1_MDIO			0x3
-			MX7D_PAD_SD2_WP__ENET1_MDC			0x3
-			MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC	0x1
-			MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0	0x1
-			MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1	0x1
-			MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2	0x1
-			MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3	0x1
-			MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL	0x1
-			MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC	0x1
-			MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0	0x1
-			MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1	0x1
-			MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2	0x1
-			MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3	0x1
-			MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL	0x1
+			MX7D_PAD_SD2_CD_B__ENET1_MDIO			0x30
+			MX7D_PAD_SD2_WP__ENET1_MDC			0x30
+			MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC	0x11
+			MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0	0x11
+			MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1	0x11
+			MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2	0x11
+			MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3	0x11
+			MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL	0x11
+			MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC	0x11
+			MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0	0x11
+			MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1	0x11
+			MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2	0x11
+			MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3	0x11
+			MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL	0x11
 		>;
 	};
 
 	pinctrl_enet2: enet2grp {
 		fsl,pins = <
-			MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC		0x1
-			MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0		0x1
-			MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1		0x1
-			MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2		0x1
-			MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3		0x1
-			MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL		0x1
-			MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC		0x1
-			MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0		0x1
-			MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1		0x1
-			MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2		0x1
-			MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3		0x1
-			MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL		0x1
+			MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC		0x11
+			MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0		0x11
+			MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1		0x11
+			MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2		0x11
+			MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3		0x11
+			MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL		0x11
+			MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC		0x11
+			MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0		0x11
+			MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1		0x11
+			MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2		0x11
+			MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3		0x11
+			MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL		0x11
 		>;
 	};
 
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index e6af69d..824eefa 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -3,6 +3,7 @@
 	sdxpoorwills-cdp.dtb \
 	sdxpoorwills-mtp.dtb \
 	sdxpoorwills-atp.dtb \
+	sdxpoorwills-ttp.dtb \
 	sdxpoorwills-cdp-256.dtb \
 	sdxpoorwills-mtp-256.dtb \
 	sdxpoorwills-dualwifi-cdp.dtb \
diff --git a/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi b/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi
index 6a3210c..d891a4b 100644
--- a/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi
+++ b/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi
@@ -71,6 +71,7 @@
 
 	audio_apr: qcom,msm-audio-apr {
 		compatible = "qcom,msm-audio-apr";
+		qcom,subsys-name = "apr_modem";
 	};
 
 	host_pcm: qcom,msm-voice-host-pcm {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dts
index 6909ef5..6c6e640 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dts
@@ -28,3 +28,7 @@
 &cnss_sdio {
 	status = "okay";
 };
+
+&blsp1_uart2b_hs {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dtsi
index d447724..16f933f 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dtsi
@@ -36,3 +36,11 @@
 	/delete-property/ qcom,devfreq,freq-table;
 	/delete-property/ cd-gpios;
 };
+
+&soc {
+	bluetooth: bt_qca6174 {
+		compatible = "qca,qca6174";
+		qca,bt-reset-gpio = <&pmxpoorwills_gpios 4 0>; /* BT_EN */
+		qca,bt-vdd-pa-supply = <&vreg_wlan>;
+	};
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-mtp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-mtp.dts
index 5fd7042..243fdaf 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-mtp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-mtp.dts
@@ -28,3 +28,7 @@
 &cnss_sdio {
 	status = "okay";
 };
+
+&blsp1_uart2b_hs {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-mtp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-mtp.dtsi
index b60225a..252fb04 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-mtp.dtsi
@@ -36,3 +36,11 @@
 	/delete-property/ qcom,devfreq,freq-table;
 	/delete-property/ cd-gpios;
 };
+
+&soc {
+	bluetooth: bt_qca6174 {
+		compatible = "qca,qca6174";
+		qca,bt-reset-gpio = <&pmxpoorwills_gpios 4 0>; /* BT_EN */
+		qca,bt-vdd-pa-supply = <&vreg_wlan>;
+	};
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi
index dafd0b8..1428f37 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi
@@ -18,7 +18,8 @@
 
 &usb {
 	status = "okay";
-	extcon = <&vbus_detect>;
+	qcom,connector-type-uAB;
+	extcon = <0>, <0>, <0>, <&vbus_detect>;
 };
 
 &pcie_ep {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi
index 1428f37..81877b7 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi
@@ -16,12 +16,6 @@
 	status = "okay";
 };
 
-&usb {
-	status = "okay";
-	qcom,connector-type-uAB;
-	extcon = <0>, <0>, <0>, <&vbus_detect>;
-};
-
 &pcie_ep {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi
index 6c5f3c3..d7c0d13 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi
@@ -18,7 +18,8 @@
 
 &usb {
 	status = "okay";
-	extcon = <&vbus_detect>;
+	qcom,connector-type-uAB;
+	extcon = <0>, <0>, <0>, <&vbus_detect>;
 };
 
 &pcie_ep {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi
index d7c0d13..6ceac6e 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi
@@ -16,12 +16,6 @@
 	status = "okay";
 };
 
-&usb {
-	status = "okay";
-	qcom,connector-type-uAB;
-	extcon = <0>, <0>, <0>, <&vbus_detect>;
-};
-
 &pcie_ep {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-ttp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-ttp.dts
new file mode 100644
index 0000000..775be96
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-ttp.dts
@@ -0,0 +1,22 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdxpoorwills-ttp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDXPOORWILLS TTP";
+	compatible = "qcom,sdxpoorwills-ttp",
+		"qcom,sdxpoorwills", "qcom,ttp";
+	qcom,board-id = <30 0x100>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-ttp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-ttp.dtsi
new file mode 100644
index 0000000..7f49b6d
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-ttp.dtsi
@@ -0,0 +1,14 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdxpoorwills-mtp.dtsi"
+
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
index 3bccd8a..9f74227 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
@@ -76,6 +76,7 @@
 			snps,disable-clk-gating;
 			snps,has-lpm-erratum;
 			snps,hird-threshold = /bits/ 8 <0x10>;
+			snps,xhci-imod-value = <4000>;
 		};
 
 		qcom,usbbam@a704000 {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index 50bbebf..eefea0f 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -77,7 +77,7 @@
 		dump_mem: mem_dump_region {
 			compatible = "shared-dma-pool";
 			reusable;
-			size = <0 0x2400000>;
+			size = <0x400000>;
 		};
 	};
 
@@ -920,6 +920,7 @@
 		qcom,mhi-event-ring-id-limits = <9 10>; /* start and end */
 		qcom,modem-cfg-emb-pipe-flt;
 		qcom,use-ipa-pm;
+		qcom,wlan-ce-db-over-pcie;
 		qcom,arm-smmu;
 		qcom,smmu-fast-map;
 		qcom,bandwidth-vote-for-ipa;
@@ -933,24 +934,24 @@
 			<1 676 0 0>,
 			<143 777 0 0>,
 		/* SVS2 */
-			<90 512 3616000 7232000>,
+			<90 512 900000 1800000>,
 			<90 585 300000 600000>,
-			<1 676 90000 180000>, /*gcc_config_noc_clk_src */
+			<1 676 90000 179000>, /*gcc_config_noc_clk_src */
 			<143 777 0 120>, /* IB defined for IPA2X_clk in MHz*/
 		/* SVS */
-			<90 512 6640000 13280000>,
+			<90 512 1530000 3060000>,
 			<90 585 400000 800000>,
-			<1 676 100000 200000>,
+			<1 676 100000 199000>,
 			<143 777 0 250>, /* IB defined for IPA2X_clk in MHz*/
 		/* NOMINAL */
-			<90 512 10400000 20800000>,
+			<90 512 2592000 5184000>,
 			<90 585 800000 1600000>,
-			<1 676 200000 400000>,
+			<1 676 200000 399000>,
 			<143 777 0 440>, /* IB defined for IPA2X_clk in MHz*/
 		/* TURBO */
-			<90 512 10400000 20800000>,
+			<90 512 2592000 5184000>,
 			<90 585 960000 1920000>,
-			<1 676 266000 532000>,
+			<1 676 266000 531000>,
 			<143 777 0 500>; /* IB defined for IPA clk in MHz*/
 		qcom,bus-vector-names = "MIN", "SVS2", "SVS", "NOMINAL",
 		"TURBO";
diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
index 6761d11..db0239c 100644
--- a/arch/arm/boot/dts/r8a7791-porter.dts
+++ b/arch/arm/boot/dts/r8a7791-porter.dts
@@ -428,7 +428,7 @@
 		      "dclkin.0", "dclkin.1";
 
 	ports {
-		port@1 {
+		port@0 {
 			endpoint {
 				remote-endpoint = <&adv7511_in>;
 			};
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 9f48141..f0702d8 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -759,7 +759,7 @@
 		timer@fffec600 {
 			compatible = "arm,cortex-a9-twd-timer";
 			reg = <0xfffec600 0x100>;
-			interrupts = <1 13 0xf04>;
+			interrupts = <1 13 0xf01>;
 			clocks = <&mpu_periph_clk>;
 		};
 
diff --git a/arch/arm/configs/msm8909-perf_defconfig b/arch/arm/configs/msm8909-perf_defconfig
index 9f5001f..e21e912 100644
--- a/arch/arm/configs/msm8909-perf_defconfig
+++ b/arch/arm/configs/msm8909-perf_defconfig
@@ -31,13 +31,15 @@
 # CONFIG_RD_XZ is not set
 # CONFIG_RD_LZO is not set
 # CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
 # CONFIG_MEMBARRIER is not set
 CONFIG_EMBEDDED=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
-CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_OPROFILE=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
 CONFIG_ARCH_MMAP_RND_BITS=16
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -54,9 +56,11 @@
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
+CONFIG_ARM_MODULE_PLTS=y
 CONFIG_CMA=y
 CONFIG_ZSMALLOC=y
 CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_PROCESS_RECLAIM=y
 CONFIG_SECCOMP=y
 CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
@@ -145,6 +149,7 @@
 CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
 CONFIG_NETFILTER_XT_MATCH_SOCKET=y
 CONFIG_NETFILTER_XT_MATCH_STATE=y
 CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
@@ -263,6 +268,7 @@
 CONFIG_CNSS_SDIO=y
 CONFIG_CLD_HL_SDIO_CORE=y
 CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=y
 CONFIG_KEYBOARD_GPIO=y
 CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
@@ -272,6 +278,8 @@
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_DIAG_USES_SMD=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_MSM_SMD_PKT=y
@@ -297,11 +305,17 @@
 CONFIG_QPNP_LINEAR_CHARGER=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
 CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_MFD_QCOM_RPM=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
@@ -321,6 +335,16 @@
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISP_V1=y
+CONFIG_MSM_ISPIF=y
+CONFIG_QCOM_KGSL=y
 CONFIG_FB=y
 CONFIG_FB_VIRTUAL=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
@@ -345,11 +369,16 @@
 CONFIG_HID_MICROSOFT=y
 CONFIG_HID_MONTEREY=y
 CONFIG_HID_MULTITOUCH=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_MSM=y
+CONFIG_USB_STORAGE=y
 CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_GADGET=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
-CONFIG_USB_MSM_SSPHY_QMP=y
-CONFIG_MSM_QUSB_PHY=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
 CONFIG_USB_GADGET_DEBUG_FS=y
@@ -357,6 +386,9 @@
 CONFIG_USB_CI13XXX_MSM=y
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_RMNET_BAM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
 CONFIG_USB_CONFIGFS_UEVENT=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
@@ -372,8 +404,7 @@
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_CLASS_FLASH=y
 CONFIG_LEDS_QPNP=y
 CONFIG_LEDS_QPNP_VIBRATOR=y
 CONFIG_LEDS_TRIGGERS=y
@@ -389,6 +420,8 @@
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
 CONFIG_ION=y
 CONFIG_ION_MSM=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_QPNP_REVID=y
@@ -396,7 +429,7 @@
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
-CONFIG_QCOM_PM=y
+CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_MSM_SPM=y
 CONFIG_MSM_L2_SPM=y
 CONFIG_MSM_BOOT_STATS=y
@@ -411,6 +444,7 @@
 CONFIG_MSM_SMD=y
 CONFIG_MSM_SMD_DEBUG=y
 CONFIG_MSM_GLINK=y
+CONFIG_MSM_TZ_SMMU=y
 CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
 CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
 CONFIG_MSM_GLINK_SPI_XPRT=y
@@ -422,9 +456,12 @@
 CONFIG_MSM_GLINK_PKT=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_BAM_DMUX=y
 CONFIG_CNSS_CRYPTO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
@@ -445,6 +482,7 @@
 CONFIG_TMPFS=y
 CONFIG_ECRYPT_FS=y
 CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
@@ -469,8 +507,13 @@
 CONFIG_PID_IN_CONTEXTIDR=y
 CONFIG_DEBUG_SET_MODULE_RONX=y
 CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SINK_TPIU=y
+CONFIG_CORESIGHT_SOURCE_ETM3X=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_DBGUI=y
 CONFIG_CORESIGHT_STM=y
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
@@ -485,13 +528,14 @@
 CONFIG_SECURITY_SMACK=y
 CONFIG_SECURITY_APPARMOR=y
 CONFIG_DEFAULT_SECURITY_DAC=y
-CONFIG_CRYPTO_CTR=y
-CONFIG_CRYPTO_XTS=y
 CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_CRC32=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCE=y
 CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
 CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
 CONFIG_CRYPTO_DEV_QCOM_ICE=y
 CONFIG_ARM_CRYPTO=y
diff --git a/arch/arm/configs/msm8909_defconfig b/arch/arm/configs/msm8909_defconfig
index c8087ad..8c3b56b 100644
--- a/arch/arm/configs/msm8909_defconfig
+++ b/arch/arm/configs/msm8909_defconfig
@@ -27,11 +27,14 @@
 CONFIG_SCHED_TUNE=y
 CONFIG_DEFAULT_USE_ENERGY_AWARE=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
@@ -47,6 +50,7 @@
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
+CONFIG_ARM_MODULE_PLTS=y
 CONFIG_CMA=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_ZSMALLOC=y
@@ -138,6 +142,7 @@
 CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
 CONFIG_NETFILTER_XT_MATCH_SOCKET=y
 CONFIG_NETFILTER_XT_MATCH_STATE=y
 CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
@@ -257,16 +262,26 @@
 CONFIG_CNSS_SDIO=y
 CONFIG_CLD_HL_SDIO_CORE=y
 CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=y
 CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_STMVL53L0X=y
 CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_MSM_HS=y
 CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_DIAG_USES_SMD=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_MSM_SMD_PKT=y
@@ -299,6 +314,7 @@
 CONFIG_QTI_THERMAL_LIMITS_DCVS=y
 CONFIG_MFD_QCOM_RPM=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
@@ -316,6 +332,16 @@
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISP_V1=y
+CONFIG_MSM_ISPIF=y
+CONFIG_QCOM_KGSL=y
 CONFIG_FB=y
 CONFIG_FB_VIRTUAL=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
@@ -340,11 +366,18 @@
 CONFIG_HID_MICROSOFT=y
 CONFIG_HID_MONTEREY=y
 CONFIG_HID_MULTITOUCH=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_MSM=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
 CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_GADGET=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
-CONFIG_USB_MSM_SSPHY_QMP=y
-CONFIG_MSM_QUSB_PHY=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
 CONFIG_USB_GADGET_DEBUG_FS=y
@@ -352,7 +385,12 @@
 CONFIG_USB_CI13XXX_MSM=y
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_RMNET_BAM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_USB_CONFIGFS_UEVENT=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_USB_CONFIGFS_F_CDEV=y
@@ -367,8 +405,7 @@
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_CLASS_FLASH=y
 CONFIG_LEDS_QPNP=y
 CONFIG_LEDS_QPNP_VIBRATOR=y
 CONFIG_LEDS_TRIGGERS=y
@@ -384,6 +421,8 @@
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
 CONFIG_ION=y
 CONFIG_ION_MSM=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_QPNP_REVID=y
@@ -391,7 +430,7 @@
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
-CONFIG_QCOM_PM=y
+CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_MSM_SPM=y
 CONFIG_MSM_L2_SPM=y
 CONFIG_MSM_BOOT_STATS=y
@@ -406,6 +445,7 @@
 CONFIG_MSM_SMD=y
 CONFIG_MSM_SMD_DEBUG=y
 CONFIG_MSM_GLINK=y
+CONFIG_MSM_TZ_SMMU=y
 CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
 CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
 CONFIG_MSM_GLINK_SPI_XPRT=y
@@ -417,10 +457,16 @@
 CONFIG_MSM_GLINK_PKT=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_BAM_DMUX=y
 CONFIG_CNSS_CRYPTO=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_IIO=y
+CONFIG_INV_ICM20602_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_QTI_MPM=y
@@ -434,13 +480,16 @@
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
 CONFIG_FUSE_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_ECRYPT_FS=y
 CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
 CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
@@ -465,6 +514,7 @@
 CONFIG_DEBUG_STACK_USAGE=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
 # CONFIG_DETECT_HUNG_TASK is not set
 CONFIG_WQ_WATCHDOG=y
 CONFIG_PANIC_TIMEOUT=5
@@ -495,8 +545,13 @@
 CONFIG_PID_IN_CONTEXTIDR=y
 CONFIG_DEBUG_SET_MODULE_RONX=y
 CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SINK_TPIU=y
+CONFIG_CORESIGHT_SOURCE_ETM3X=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_DBGUI=y
 CONFIG_CORESIGHT_STM=y
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
@@ -514,10 +569,13 @@
 CONFIG_CRYPTO_CTR=y
 CONFIG_CRYPTO_XTS=y
 CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_CRC32=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
 CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
 CONFIG_CRYPTO_DEV_QCOM_ICE=y
 CONFIG_ARM_CRYPTO=y
diff --git a/arch/arm/configs/msm8909w-perf_defconfig b/arch/arm/configs/msm8909w-perf_defconfig
index 5a56d63..b2e4be0 100644
--- a/arch/arm/configs/msm8909w-perf_defconfig
+++ b/arch/arm/configs/msm8909w-perf_defconfig
@@ -1,6 +1,5 @@
 # CONFIG_FHANDLE is not set
 CONFIG_AUDIT=y
-# CONFIG_AUDITSYSCALL is not set
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IRQ_TIME_ACCOUNTING=y
@@ -28,12 +27,17 @@
 CONFIG_DEFAULT_USE_ENERGY_AWARE=y
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
+CONFIG_OPROFILE=m
 CONFIG_CC_STACKPROTECTOR_REGULAR=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -182,11 +186,8 @@
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
 CONFIG_NET_SCH_PRIO=y
-CONFIG_NET_SCH_MULTIQ=y
-CONFIG_NET_SCH_INGRESS=y
 CONFIG_NET_CLS_FW=y
 CONFIG_NET_CLS_U32=y
-CONFIG_CLS_U32_MARK=y
 CONFIG_NET_CLS_FLOW=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_CMP=y
@@ -225,19 +226,15 @@
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
 CONFIG_CHR_DEV_SCH=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SCAN_ASYNC=y
 CONFIG_SCSI_UFSHCD=y
 CONFIG_SCSI_UFSHCD_PLATFORM=y
 CONFIG_SCSI_UFS_QCOM=y
-CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
-CONFIG_DM_VERITY_FEC=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
 CONFIG_TUN=y
@@ -295,11 +292,17 @@
 CONFIG_QPNP_SMB2=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
 CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_MFD_QCOM_RPM=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
@@ -326,30 +329,15 @@
 CONFIG_FB_MSM=y
 CONFIG_FB_MSM_MDSS=y
 CONFIG_FB_MSM_MDSS_WRITEBACK=y
+CONFIG_FB_MSM_MDSS_SPI_PANEL=y
 CONFIG_FB_MSM_MDSS_MDP3=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
-CONFIG_LOGO=y
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_SOC=y
 CONFIG_UHID=y
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_ELECOM=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_KENSINGTON=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_MAGICMOUSE=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HID_MULTITOUCH=y
-CONFIG_DUAL_ROLE_USB_INTF=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
 CONFIG_USB_GADGET_DEBUG_FS=y
@@ -357,9 +345,6 @@
 CONFIG_USB_CI13XXX_MSM=y
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
-CONFIG_USB_CONFIGFS_ACM=y
-CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_ECM=y
 CONFIG_USB_CONFIGFS_RMNET_BAM=y
 CONFIG_USB_CONFIGFS_EEM=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
@@ -411,7 +396,6 @@
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
 CONFIG_QCOM_LAZY_MAPPING=y
-CONFIG_QCOM_PM=y
 CONFIG_MSM_SPM=y
 CONFIG_MSM_L2_SPM=y
 CONFIG_MSM_BOOT_STATS=y
@@ -438,10 +422,12 @@
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_GLINK_PKT=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_SYSMON_COMM=y
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_MSM_EVENT_TIMER=y
+CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_MSM_BAM_DMUX=y
 CONFIG_MSM_GLINK_BGCOM_XPRT=y
@@ -474,28 +460,21 @@
 CONFIG_TMPFS=y
 CONFIG_ECRYPT_FS=y
 CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
 CONFIG_FRAME_WARN=2048
-CONFIG_PAGE_OWNER=y
-CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_LOCKUP_DETECTOR=y
-# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_PAGE_EXTENSION=y
+CONFIG_PANIC_ON_RECURSIVE_FAULT=y
 CONFIG_WQ_WATCHDOG=y
 CONFIG_PANIC_TIMEOUT=5
 CONFIG_PANIC_ON_SCHED_BUG=y
 CONFIG_PANIC_ON_RT_THROTTLING=y
-CONFIG_SCHEDSTATS=y
-CONFIG_SCHED_STACK_END_CHECK=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_IPC_LOGGING=y
-CONFIG_LKDTM=y
-CONFIG_PANIC_ON_DATA_CORRUPTION=y
-# CONFIG_ARM_UNWIND is not set
-CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_DEBUG_USER=y
 CONFIG_DEBUG_SET_MODULE_RONX=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
diff --git a/arch/arm/configs/msm8909w_defconfig b/arch/arm/configs/msm8909w_defconfig
index af47269..8108661 100644
--- a/arch/arm/configs/msm8909w_defconfig
+++ b/arch/arm/configs/msm8909w_defconfig
@@ -6,7 +6,6 @@
 CONFIG_IRQ_TIME_ACCOUNTING=y
 CONFIG_SCHED_WALT=y
 CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
 CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_RCU_EXPERT=y
@@ -18,7 +17,6 @@
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
 CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
-CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
@@ -29,12 +27,17 @@
 CONFIG_SCHED_TUNE=y
 CONFIG_DEFAULT_USE_ENERGY_AWARE=y
 CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
+CONFIG_OPROFILE=m
 CONFIG_CC_STACKPROTECTOR_REGULAR=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -50,11 +53,11 @@
 CONFIG_SCHED_MC=y
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
-CONFIG_HIGHMEM=y
 CONFIG_CMA=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_ZSMALLOC=y
 CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_PROCESS_RECLAIM=y
 CONFIG_SECCOMP=y
 CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
@@ -65,12 +68,10 @@
 CONFIG_PM_AUTOSLEEP=y
 CONFIG_PM_WAKELOCKS=y
 CONFIG_PM_WAKELOCKS_LIMIT=0
-CONFIG_PM_DEBUG=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
-CONFIG_XFRM_STATISTICS=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
@@ -111,14 +112,13 @@
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
 CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
 CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
 CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
 CONFIG_NETFILTER_XT_TARGET_LOG=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
-CONFIG_NETFILTER_XT_TARGET_TEE=y
 CONFIG_NETFILTER_XT_TARGET_TPROXY=y
 CONFIG_NETFILTER_XT_TARGET_TRACE=y
 CONFIG_NETFILTER_XT_TARGET_SECMARK=y
@@ -128,7 +128,6 @@
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
 CONFIG_NETFILTER_XT_MATCH_DSCP=y
-CONFIG_NETFILTER_XT_MATCH_ESP=y
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
 CONFIG_NETFILTER_XT_MATCH_HELPER=y
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
@@ -150,6 +149,7 @@
 CONFIG_NETFILTER_XT_MATCH_TIME=y
 CONFIG_NETFILTER_XT_MATCH_U32=y
 CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_DUP_IPV4=y
 CONFIG_IP_NF_IPTABLES=y
 CONFIG_IP_NF_MATCH_AH=y
 CONFIG_IP_NF_MATCH_ECN=y
@@ -168,14 +168,13 @@
 CONFIG_IP_NF_ARPFILTER=y
 CONFIG_IP_NF_ARP_MANGLE=y
 CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_NF_DUP_IPV6=y
 CONFIG_IP6_NF_IPTABLES=y
 CONFIG_IP6_NF_MATCH_RPFILTER=y
 CONFIG_IP6_NF_FILTER=y
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
 CONFIG_IP6_NF_RAW=y
-CONFIG_BRIDGE_NF_EBTABLES=y
-CONFIG_BRIDGE_EBT_BROUTE=y
 CONFIG_L2TP=y
 CONFIG_L2TP_DEBUGFS=y
 CONFIG_L2TP_V3=y
@@ -185,8 +184,6 @@
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
 CONFIG_NET_SCH_PRIO=y
-CONFIG_NET_SCH_MULTIQ=y
-CONFIG_NET_SCH_INGRESS=y
 CONFIG_NET_CLS_FW=y
 CONFIG_NET_CLS_U32=y
 CONFIG_CLS_U32_MARK=y
@@ -199,8 +196,6 @@
 CONFIG_NET_EMATCH_TEXT=y
 CONFIG_NET_CLS_ACT=y
 CONFIG_NET_ACT_GACT=y
-CONFIG_NET_ACT_MIRRED=y
-CONFIG_NET_ACT_SKBEDIT=y
 CONFIG_DNS_RESOLVER=y
 CONFIG_RMNET_DATA=y
 CONFIG_RMNET_DATA_FC=y
@@ -219,26 +214,11 @@
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
-CONFIG_MEMORY_STATE_TIME=y
 CONFIG_QPNP_MISC=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_CHR_DEV_SCH=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_SCSI_UFSHCD=y
-CONFIG_SCSI_UFSHCD_PLATFORM=y
-CONFIG_SCSI_UFS_QCOM=y
-CONFIG_SCSI_UFS_QCOM_ICE=y
-CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
-CONFIG_DM_DEBUG=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
@@ -262,7 +242,6 @@
 CONFIG_CLD_LL_CORE=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
-CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
 CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
@@ -303,11 +282,17 @@
 CONFIG_QPNP_SMB2=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
 CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_MFD_QCOM_RPM=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
@@ -334,34 +319,19 @@
 CONFIG_FB_MSM=y
 CONFIG_FB_MSM_MDSS=y
 CONFIG_FB_MSM_MDSS_WRITEBACK=y
+CONFIG_FB_MSM_MDSS_SPI_PANEL=y
 CONFIG_FB_MSM_MDSS_MDP3=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
 CONFIG_LOGO=y
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_SOC=y
 CONFIG_UHID=y
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_ELECOM=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_KENSINGTON=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_MAGICMOUSE=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HID_MULTITOUCH=y
-CONFIG_USB_DWC3=y
-CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
-CONFIG_USB_MSM_SSPHY_QMP=y
-CONFIG_MSM_QUSB_PHY=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
 CONFIG_USB_GADGET_DEBUG_FS=y
@@ -369,15 +339,10 @@
 CONFIG_USB_CI13XXX_MSM=y
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
-CONFIG_USB_CONFIGFS_ACM=y
 CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_ECM=y
 CONFIG_USB_CONFIGFS_RMNET_BAM=y
-CONFIG_USB_CONFIGFS_EEM=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
-CONFIG_USB_CONFIGFS_F_MTP=y
-CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_USB_CONFIGFS_F_ACC=y
 CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
 CONFIG_USB_CONFIGFS_UEVENT=y
@@ -386,7 +351,6 @@
 CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_USB_CONFIGFS_F_CDEV=y
 CONFIG_USB_CONFIGFS_F_CCID=y
-CONFIG_USB_CONFIGFS_F_GSI=y
 CONFIG_MMC=y
 CONFIG_MMC_PERF_PROFILING=y
 CONFIG_MMC_RING_BUFFER=y
@@ -425,7 +389,6 @@
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
 CONFIG_QCOM_LAZY_MAPPING=y
-CONFIG_QCOM_PM=y
 CONFIG_MSM_SPM=y
 CONFIG_MSM_L2_SPM=y
 CONFIG_MSM_BOOT_STATS=y
@@ -452,6 +415,7 @@
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_GLINK_PKT=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_SYSMON_COMM=y
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
@@ -478,37 +442,29 @@
 CONFIG_ANDROID_BINDER_IPC=y
 CONFIG_SENSORS_SSC=y
 CONFIG_MSM_TZ_LOG=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
-CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_ECRYPT_FS=y
 CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
+CONFIG_MESSAGE_LOGLEVEL_DEFAULT=6
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
 CONFIG_FRAME_WARN=2048
 CONFIG_PAGE_OWNER=y
 CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
-CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_PAGEALLOC=y
 CONFIG_SLUB_DEBUG_PANIC_ON=y
 CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
-CONFIG_DEBUG_OBJECTS=y
-CONFIG_DEBUG_OBJECTS_FREE=y
-CONFIG_DEBUG_OBJECTS_TIMERS=y
-CONFIG_DEBUG_OBJECTS_WORK=y
-CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
-CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
 CONFIG_SLUB_DEBUG_ON=y
 CONFIG_DEBUG_KMEMLEAK=y
 CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
@@ -518,7 +474,8 @@
 CONFIG_LOCKUP_DETECTOR=y
 # CONFIG_DETECT_HUNG_TASK is not set
 CONFIG_WQ_WATCHDOG=y
-CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=-1
 CONFIG_PANIC_ON_SCHED_BUG=y
 CONFIG_PANIC_ON_RT_THROTTLING=y
 CONFIG_SCHEDSTATS=y
@@ -546,8 +503,13 @@
 CONFIG_PID_IN_CONTEXTIDR=y
 CONFIG_DEBUG_SET_MODULE_RONX=y
 CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SINK_TPIU=y
+CONFIG_CORESIGHT_SOURCE_ETM3X=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_DBGUI=y
 CONFIG_CORESIGHT_STM=y
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
diff --git a/arch/arm/configs/msm8937-perf_defconfig b/arch/arm/configs/msm8937-perf_defconfig
index b113ebd..3163111 100644
--- a/arch/arm/configs/msm8937-perf_defconfig
+++ b/arch/arm/configs/msm8937-perf_defconfig
@@ -17,8 +17,11 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_BPF=y
 CONFIG_SCHED_CORE_CTL=y
@@ -64,6 +67,7 @@
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
+CONFIG_ARM_MODULE_PLTS=y
 CONFIG_CMA=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_ZSMALLOC=y
@@ -74,7 +78,6 @@
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
@@ -145,6 +148,7 @@
 CONFIG_NETFILTER_XT_TARGET_TRACE=y
 CONFIG_NETFILTER_XT_TARGET_SECMARK=y
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
 CONFIG_NETFILTER_XT_MATCH_COMMENT=y
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@@ -306,6 +310,10 @@
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
 CONFIG_TOUCHSCREEN_FT5X06=y
 CONFIG_TOUCHSCREEN_GEN_VKEYS=y
 CONFIG_INPUT_MISC=y
@@ -342,10 +350,10 @@
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_POWER_SUPPLY=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
@@ -354,12 +362,22 @@
 CONFIG_MSM_APM=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
-CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
@@ -382,9 +400,7 @@
 CONFIG_USB_VIDEO_CLASS=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_MSM_CAMERA=y
-CONFIG_MSM_CAMERA_DEBUG=y
 CONFIG_MSMB_CAMERA=y
-CONFIG_MSMB_CAMERA_DEBUG=y
 CONFIG_MSM_CAMERA_SENSOR=y
 CONFIG_MSM_CPP=y
 CONFIG_MSM_CCI=y
@@ -550,7 +566,6 @@
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
-CONFIG_ICNSS=y
 CONFIG_MSM_PERFORMANCE=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_AVTIMER=y
@@ -559,17 +574,18 @@
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_MEM_SHARE_QMI_SERVICE=y
-# CONFIG_MSM_JTAGV8 is not set
 CONFIG_MSM_BAM_DMUX=y
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
@@ -582,6 +598,7 @@
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_F2FS_FS=y
 CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -605,7 +622,6 @@
 CONFIG_CPU_FREQ_SWITCH_PROFILER=y
 CONFIG_CORESIGHT=y
 CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
-CONFIG_CORESIGHT_SOURCE_ETM4X=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
 CONFIG_CORESIGHT_QCOM_REPLICATOR=y
@@ -616,16 +632,17 @@
 CONFIG_CORESIGHT_CTI=y
 CONFIG_CORESIGHT_EVENT=y
 CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_LSM_MMAP_MIN_ADDR=4096
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
-CONFIG_CRYPTO_CTR=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_LZ4=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
 CONFIG_CRYPTO_DEV_QCRYPTO=y
diff --git a/arch/arm/configs/msm8937_defconfig b/arch/arm/configs/msm8937_defconfig
index 1cccfd3..87f915c 100644
--- a/arch/arm/configs/msm8937_defconfig
+++ b/arch/arm/configs/msm8937_defconfig
@@ -18,8 +18,11 @@
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
 CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_BPF=y
 CONFIG_SCHED_CORE_CTL=y
@@ -67,6 +70,7 @@
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
+CONFIG_ARM_MODULE_PLTS=y
 CONFIG_CMA=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_ZSMALLOC=y
@@ -77,7 +81,6 @@
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
@@ -149,6 +152,7 @@
 CONFIG_NETFILTER_XT_TARGET_TRACE=y
 CONFIG_NETFILTER_XT_TARGET_SECMARK=y
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
 CONFIG_NETFILTER_XT_MATCH_COMMENT=y
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@@ -311,6 +315,10 @@
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
 CONFIG_TOUCHSCREEN_FT5X06=y
 CONFIG_TOUCHSCREEN_GEN_VKEYS=y
 CONFIG_INPUT_MISC=y
@@ -349,10 +357,10 @@
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_POWER_SUPPLY=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
@@ -361,12 +369,22 @@
 CONFIG_MSM_APM=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
-CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
@@ -541,14 +559,15 @@
 CONFIG_IOMMU_DEBUG=y
 CONFIG_IOMMU_DEBUG_TRACKING=y
 CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_MSM_SPM=y
 CONFIG_MSM_L2_SPM=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_MSM_CORE_HANG_DETECT=y
-CONFIG_MSM_GLADIATOR_HANG_DETECT=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_DEBUG_LAR_UNLOCK=y
 CONFIG_MSM_RPM_SMD=y
 CONFIG_QCOM_BUS_SCALING=y
 CONFIG_QCOM_SECURE_BUFFER=y
@@ -566,7 +585,6 @@
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
-CONFIG_ICNSS=y
 CONFIG_MSM_PERFORMANCE=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_AVTIMER=y
@@ -580,15 +598,18 @@
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
+CONFIG_QCOM_SHOW_RESUME_IRQ=y
 CONFIG_QTI_MPM=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
@@ -598,6 +619,7 @@
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_F2FS_FS=y
 CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -619,13 +641,14 @@
 CONFIG_DEBUG_PAGEALLOC=y
 CONFIG_SLUB_DEBUG_PANIC_ON=y
 CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_PAGE_POISONING=y
+CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
 CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_FREE=y
 CONFIG_DEBUG_OBJECTS_TIMERS=y
 CONFIG_DEBUG_OBJECTS_WORK=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
-CONFIG_SLUB_DEBUG_ON=y
 CONFIG_DEBUG_KMEMLEAK=y
 CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
 CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
@@ -660,6 +683,7 @@
 CONFIG_MEMTEST=y
 CONFIG_PANIC_ON_DATA_CORRUPTION=y
 CONFIG_DEBUG_USER=y
+CONFIG_FORCE_PAGES=y
 CONFIG_PID_IN_CONTEXTIDR=y
 CONFIG_DEBUG_SET_MODULE_RONX=y
 CONFIG_CORESIGHT=y
@@ -675,16 +699,17 @@
 CONFIG_CORESIGHT_CTI=y
 CONFIG_CORESIGHT_EVENT=y
 CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_LSM_MMAP_MIN_ADDR=4096
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
-CONFIG_CRYPTO_CTR=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_LZ4=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
 CONFIG_CRYPTO_DEV_QCRYPTO=y
diff --git a/arch/arm/configs/msm8953-batcam-perf_defconfig b/arch/arm/configs/msm8953-batcam-perf_defconfig
index 1610d29..a6fe9b0 100644
--- a/arch/arm/configs/msm8953-batcam-perf_defconfig
+++ b/arch/arm/configs/msm8953-batcam-perf_defconfig
@@ -40,6 +40,7 @@
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_MSM8953=y
+CONFIG_ARCH_MSM8953_BOOT_ORDERING=y
 # CONFIG_VDSO is not set
 CONFIG_SMP=y
 CONFIG_SCHED_MC=y
@@ -66,6 +67,7 @@
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_HIBERNATION=y
 CONFIG_HIBERNATION_IMAGE_REUSE=y
+CONFIG_HIBERNATION_SKIP_CRC=y
 CONFIG_PM_STD_PARTITION="/dev/mmcblk0p49"
 CONFIG_PM_AUTOSLEEP=y
 CONFIG_PM_WAKELOCKS=y
@@ -75,10 +77,13 @@
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_DMA_CMA=y
+# CONFIG_OF_KOBJ is not set
 CONFIG_QSEECOM=y
+CONFIG_SCSI=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_NETDEVICES=y
+CONFIG_USB_USBNET=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_QPNP_POWER_ON=y
@@ -141,16 +146,46 @@
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
 CONFIG_SND_SOC=y
 CONFIG_UHID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_ONETOUCH=y
+CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_CYPRESS_ATACB=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_GADGET_DEBUG_FS=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_MMC=y
 CONFIG_MMC_PARANOID_SD_INIT=y
 CONFIG_MMC_CLKGATE=y
@@ -177,6 +212,7 @@
 CONFIG_RTC_DRV_QPNP=y
 CONFIG_DMADEVICES=y
 CONFIG_QCOM_SPS_DMA=y
+CONFIG_SYNC_FILE=y
 CONFIG_UIO=y
 CONFIG_UIO_MSM_SHAREDMEM=y
 CONFIG_STAGING=y
@@ -190,7 +226,7 @@
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_QPNP_COINCELL=y
 CONFIG_QPNP_REVID=y
-CONFIG_MSM_MDSS_PLL=y
+CONFIG_USB_BAM=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
@@ -213,7 +249,6 @@
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
-CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_ICNSS=y
 CONFIG_MSM_PERFORMANCE=y
 CONFIG_MSM_EVENT_TIMER=y
@@ -228,6 +263,7 @@
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_EXTCON_USB_GPIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
diff --git a/arch/arm/configs/msm8953-batcam_defconfig b/arch/arm/configs/msm8953-batcam_defconfig
index 1ba9d96..cd86b01 100644
--- a/arch/arm/configs/msm8953-batcam_defconfig
+++ b/arch/arm/configs/msm8953-batcam_defconfig
@@ -39,6 +39,7 @@
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_MSM8953=y
+CONFIG_ARCH_MSM8953_BOOT_ORDERING=y
 # CONFIG_VDSO is not set
 CONFIG_SMP=y
 CONFIG_SCHED_MC=y
@@ -65,6 +66,7 @@
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_HIBERNATION=y
 CONFIG_HIBERNATION_IMAGE_REUSE=y
+CONFIG_HIBERNATION_SKIP_CRC=y
 CONFIG_PM_STD_PARTITION="/dev/mmcblk0p49"
 CONFIG_PM_AUTOSLEEP=y
 CONFIG_PM_WAKELOCKS=y
@@ -74,10 +76,13 @@
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_DMA_CMA=y
+# CONFIG_OF_KOBJ is not set
 CONFIG_QSEECOM=y
+CONFIG_SCSI=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_NETDEVICES=y
+CONFIG_USB_USBNET=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_QPNP_POWER_ON=y
@@ -142,16 +147,46 @@
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
 CONFIG_SND_SOC=y
 CONFIG_UHID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_ONETOUCH=y
+CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_CYPRESS_ATACB=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_GADGET_DEBUG_FS=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_MMC=y
 CONFIG_MMC_PARANOID_SD_INIT=y
 CONFIG_MMC_CLKGATE=y
@@ -178,6 +213,7 @@
 CONFIG_RTC_DRV_QPNP=y
 CONFIG_DMADEVICES=y
 CONFIG_QCOM_SPS_DMA=y
+CONFIG_SYNC_FILE=y
 CONFIG_UIO=y
 CONFIG_UIO_MSM_SHAREDMEM=y
 CONFIG_STAGING=y
@@ -191,7 +227,7 @@
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_QPNP_COINCELL=y
 CONFIG_QPNP_REVID=y
-CONFIG_MSM_MDSS_PLL=y
+CONFIG_USB_BAM=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
@@ -214,7 +250,6 @@
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
-CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_ICNSS=y
 CONFIG_MSM_PERFORMANCE=y
 CONFIG_MSM_EVENT_TIMER=y
@@ -229,6 +264,7 @@
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_EXTCON_USB_GPIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
diff --git a/arch/arm/configs/msm8953-perf_defconfig b/arch/arm/configs/msm8953-perf_defconfig
index aa557b0..848cfce 100644
--- a/arch/arm/configs/msm8953-perf_defconfig
+++ b/arch/arm/configs/msm8953-perf_defconfig
@@ -17,6 +17,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
@@ -63,6 +64,7 @@
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
+CONFIG_ARM_MODULE_PLTS=y
 CONFIG_CMA=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_ZSMALLOC=y
@@ -144,6 +146,7 @@
 CONFIG_NETFILTER_XT_TARGET_TRACE=y
 CONFIG_NETFILTER_XT_TARGET_SECMARK=y
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
 CONFIG_NETFILTER_XT_MATCH_COMMENT=y
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@@ -344,10 +347,10 @@
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_POWER_SUPPLY=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
@@ -369,7 +372,9 @@
 CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
@@ -571,6 +576,7 @@
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_AVTIMER=y
 CONFIG_MSM_PM=y
+CONFIG_QCOM_DCC=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_MEM_SHARE_QMI_SERVICE=y
@@ -578,12 +584,14 @@
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
@@ -594,6 +602,9 @@
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -616,21 +627,24 @@
 CONFIG_IPC_LOGGING=y
 CONFIG_CPU_FREQ_SWITCH_PROFILER=y
 CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_DBGUI=y
 CONFIG_CORESIGHT_STM=y
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_CTI=y
 CONFIG_CORESIGHT_EVENT=y
 CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_LSM_MMAP_MIN_ADDR=4096
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
-CONFIG_CRYPTO_CTR=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm/configs/msm8953_defconfig b/arch/arm/configs/msm8953_defconfig
index f38341d..64b848b 100644
--- a/arch/arm/configs/msm8953_defconfig
+++ b/arch/arm/configs/msm8953_defconfig
@@ -18,6 +18,7 @@
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
 CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
@@ -66,6 +67,7 @@
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
+CONFIG_ARM_MODULE_PLTS=y
 CONFIG_CMA=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_ZSMALLOC=y
@@ -148,6 +150,7 @@
 CONFIG_NETFILTER_XT_TARGET_TRACE=y
 CONFIG_NETFILTER_XT_TARGET_SECMARK=y
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
 CONFIG_NETFILTER_XT_MATCH_COMMENT=y
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@@ -351,10 +354,10 @@
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_POWER_SUPPLY=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
@@ -376,7 +379,9 @@
 CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
@@ -557,14 +562,15 @@
 CONFIG_IOMMU_DEBUG=y
 CONFIG_IOMMU_DEBUG_TRACKING=y
 CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_MSM_SPM=y
 CONFIG_MSM_L2_SPM=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_MSM_CORE_HANG_DETECT=y
-CONFIG_MSM_GLADIATOR_HANG_DETECT=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_DEBUG_LAR_UNLOCK=y
 CONFIG_MSM_RPM_SMD=y
 CONFIG_QCOM_BUS_SCALING=y
 CONFIG_QCOM_SECURE_BUFFER=y
@@ -587,6 +593,7 @@
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_AVTIMER=y
 CONFIG_MSM_PM=y
+CONFIG_QCOM_DCC=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_MEM_SHARE_QMI_SERVICE=y
@@ -594,12 +601,14 @@
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
@@ -610,6 +619,9 @@
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -631,6 +643,8 @@
 CONFIG_DEBUG_PAGEALLOC=y
 CONFIG_SLUB_DEBUG_PANIC_ON=y
 CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_PAGE_POISONING=y
+CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
 CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_FREE=y
 CONFIG_DEBUG_OBJECTS_TIMERS=y
@@ -672,24 +686,29 @@
 CONFIG_MEMTEST=y
 CONFIG_PANIC_ON_DATA_CORRUPTION=y
 CONFIG_DEBUG_USER=y
+CONFIG_FORCE_PAGES=y
 CONFIG_PID_IN_CONTEXTIDR=y
 CONFIG_DEBUG_SET_MODULE_RONX=y
 CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_DBGUI=y
 CONFIG_CORESIGHT_STM=y
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_CTI=y
 CONFIG_CORESIGHT_EVENT=y
 CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_LSM_MMAP_MIN_ADDR=4096
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
-CONFIG_CRYPTO_CTR=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index 14c2a7c..cab3796 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -32,7 +32,9 @@
 CONFIG_CMA=y
 CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_MSM=y
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
@@ -407,9 +409,11 @@
 CONFIG_PANIC_TIMEOUT=5
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_IPC_LOGGING=y
 CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
 CONFIG_CORESIGHT=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
 CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
@@ -421,6 +425,7 @@
 CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 # CONFIG_SECURITY_SELINUX_AVC_STATS is not set
 CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index b259dc7..bd429ee 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -34,7 +34,9 @@
 CONFIG_CMA=y
 CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_MSM=y
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
@@ -155,6 +157,8 @@
 CONFIG_RMNET_DATA=y
 CONFIG_RMNET_DATA_FC=y
 CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
 CONFIG_CFG80211=y
 CONFIG_CFG80211_DEBUGFS=y
 CONFIG_CFG80211_INTERNAL_REGDB=y
@@ -226,7 +230,6 @@
 CONFIG_SPI=y
 CONFIG_SPI_QUP=y
 CONFIG_SPI_SPIDEV=m
-CONFIG_SLIMBUS=y
 CONFIG_SPMI=y
 CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
 CONFIG_PTP_1588_CLOCK=y
@@ -295,6 +298,7 @@
 CONFIG_USB_STORAGE_CYPRESS_ATACB=y
 CONFIG_USB_DWC3=y
 CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_USB_MSM_SSPHY_QMP=y
 CONFIG_MSM_HSUSB_PHY=y
@@ -416,6 +420,7 @@
 CONFIG_PANIC_ON_RECURSIVE_FAULT=y
 CONFIG_PANIC_TIMEOUT=5
 CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
 CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
@@ -430,6 +435,7 @@
 CONFIG_PREEMPT_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
 CONFIG_CORESIGHT=y
 CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
 CONFIG_CORESIGHT_SOURCE_ETM3X=y
@@ -446,10 +452,9 @@
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 # CONFIG_SECURITY_SELINUX_AVC_STATS is not set
-CONFIG_CRYPTO_CMAC=y
-CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
 CONFIG_CRYPTO_DEV_QCRYPTO=y
 CONFIG_CRYPTO_DEV_QCEDEV=y
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index a808829..4d1065c 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -242,6 +242,15 @@
 	writel_relaxed((u32)(val >> 32), addr + 4);
 }
 
+static inline u64 gic_read_irouter(const volatile void __iomem *addr)
+{
+	u64 val;
+
+	val = readl_relaxed(addr);
+	val |= (u64)readl_relaxed(addr + 4) << 32;
+	return val;
+}
+
 static inline u64 gic_read_typer(const volatile void __iomem *addr)
 {
 	u64 val;
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 12f99fd..3aed449 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -534,4 +534,14 @@
 #endif
 	.endm
 
+#ifdef CONFIG_KPROBES
+#define _ASM_NOKPROBE(entry)				\
+	.pushsection "_kprobe_blacklist", "aw" ;	\
+	.balign 4 ;					\
+	.long entry;					\
+	.popsection
+#else
+#define _ASM_NOKPROBE(entry)
+#endif
+
 #endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/etmv4x.h b/arch/arm/include/asm/etmv4x.h
new file mode 100644
index 0000000..7ad0a92
--- /dev/null
+++ b/arch/arm/include/asm/etmv4x.h
@@ -0,0 +1,387 @@
+/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ETMV4X_H
+#define __ASM_ETMV4X_H
+
+#include <linux/types.h>
+
+
+/* 32 bit register read for AArch32 */
+#define trc_readl(reg)			RSYSL_##reg()
+#define trc_readq(reg)			RSYSL_##reg()
+
+/* 32 bit register write for AArch32 */
+#define trc_write(val, reg)		WSYS_##reg(val)
+
+#define MRC(op0, op1, crn, crm, op2)					    \
+({									    \
+uint32_t val;								    \
+asm volatile("mrc p"#op0", "#op1", %0, "#crn", "#crm", "#op2 : "=r" (val)); \
+val;									    \
+})
+
+#define MCR(val, op0, op1, crn, crm, op2)				    \
+({									    \
+asm volatile("mcr p"#op0", "#op1", %0, "#crn", "#crm", "#op2 : : "r" (val));\
+})
+
+/* Clock and Power Management Register */
+#define RSYSL_CPMR_EL1()		MRC(15, 7, c15, c0, 5)
+#define WSYS_CPMR_EL1(val)		MCR(val, 15, 7, c15, c0, 5)
+
+/*
+ * ETMv4 Registers
+ *
+ * Read only
+ * ETMAUTHSTATUS, ETMDEVARCH, ETMDEVID, ETMIDRn[0-13], ETMOSLSR, ETMSTATR
+ *
+ * Write only
+ * ETMOSLAR
+ */
+/* 32 bit registers */
+#define RSYSL_ETMAUTHSTATUS()		MRC(14, 1, c7, c14, 6)
+#define RSYSL_ETMAUXCTLR()		MRC(14, 1, c0, c6, 0)
+#define RSYSL_ETMCCCTLR()		MRC(14, 1, c0, c14, 0)
+#define RSYSL_ETMCIDCCTLR0()		MRC(14, 1, c3, c0, 2)
+#define RSYSL_ETMCNTCTLR0()		MRC(14, 1, c0, c4, 5)
+#define RSYSL_ETMCNTCTLR1()		MRC(14, 1, c0, c5, 5)
+#define RSYSL_ETMCNTCTLR2()		MRC(14, 1, c0, c6, 5)
+#define RSYSL_ETMCNTCTLR3()		MRC(14, 1, c0, c7, 5)
+#define RSYSL_ETMCNTRLDVR0()		MRC(14, 1, c0, c0, 5)
+#define RSYSL_ETMCNTRLDVR1()		MRC(14, 1, c0, c1, 5)
+#define RSYSL_ETMCNTRLDVR2()		MRC(14, 1, c0, c2, 5)
+#define RSYSL_ETMCNTRLDVR3()		MRC(14, 1, c0, c3, 5)
+#define RSYSL_ETMCNTVR0()		MRC(14, 1, c0, c8, 5)
+#define RSYSL_ETMCNTVR1()		MRC(14, 1, c0, c9, 5)
+#define RSYSL_ETMCNTVR2()		MRC(14, 1, c0, c10, 5)
+#define RSYSL_ETMCNTVR3()		MRC(14, 1, c0, c11, 5)
+#define RSYSL_ETMCONFIGR()		MRC(14, 1, c0, c4, 0)
+#define RSYSL_ETMDEVARCH()		MRC(14, 1, c7, c15, 6)
+#define RSYSL_ETMDEVID()		MRC(14, 1, c7, c2, 7)
+#define RSYSL_ETMEVENTCTL0R()		MRC(14, 1, c0, c8, 0)
+#define RSYSL_ETMEVENTCTL1R()		MRC(14, 1, c0, c9, 0)
+#define RSYSL_ETMEXTINSELR()		MRC(14, 1, c0, c8, 4)
+#define RSYSL_ETMIDR0()			MRC(14, 1, c0, c8, 7)
+#define RSYSL_ETMIDR1()			MRC(14, 1, c0, c9, 7)
+#define RSYSL_ETMIDR10()		MRC(14, 1, c0, c2, 6)
+#define RSYSL_ETMIDR11()		MRC(14, 1, c0, c3, 6)
+#define RSYSL_ETMIDR12()		MRC(14, 1, c0, c4, 6)
+#define RSYSL_ETMIDR13()		MRC(14, 1, c0, c5, 6)
+#define RSYSL_ETMIDR2()			MRC(14, 1, c0, c10, 7)
+#define RSYSL_ETMIDR3()			MRC(14, 1, c0, c11, 7)
+#define RSYSL_ETMIDR4()			MRC(14, 1, c0, c12, 7)
+#define RSYSL_ETMIDR5()			MRC(14, 1, c0, c13, 7)
+#define RSYSL_ETMIDR6()			MRC(14, 1, c0, c14, 7)
+#define RSYSL_ETMIDR7()			MRC(14, 1, c0, c15, 7)
+#define RSYSL_ETMIDR8()			MRC(14, 1, c0, c0, 6)
+#define RSYSL_ETMIDR9()			MRC(14, 1, c0, c1, 6)
+#define RSYSL_ETMIMSPEC0()		MRC(14, 1, c0, c0, 7)
+#define RSYSL_ETMOSLSR()		MRC(14, 1, c1, c1, 4)
+#define RSYSL_ETMPRGCTLR()		MRC(14, 1, c0, c1, 0)
+#define RSYSL_ETMRSCTLR10()		MRC(14, 1, c1, c10, 0)
+#define RSYSL_ETMRSCTLR11()		MRC(14, 1, c1, c11, 0)
+#define RSYSL_ETMRSCTLR12()		MRC(14, 1, c1, c12, 0)
+#define RSYSL_ETMRSCTLR13()		MRC(14, 1, c1, c13, 0)
+#define RSYSL_ETMRSCTLR14()		MRC(14, 1, c1, c14, 0)
+#define RSYSL_ETMRSCTLR15()		MRC(14, 1, c1, c15, 0)
+#define RSYSL_ETMRSCTLR2()		MRC(14, 1, c1, c2, 0)
+#define RSYSL_ETMRSCTLR3()		MRC(14, 1, c1, c3, 0)
+#define RSYSL_ETMRSCTLR4()		MRC(14, 1, c1, c4, 0)
+#define RSYSL_ETMRSCTLR5()		MRC(14, 1, c1, c5, 0)
+#define RSYSL_ETMRSCTLR6()		MRC(14, 1, c1, c6, 0)
+#define RSYSL_ETMRSCTLR7()		MRC(14, 1, c1, c7, 0)
+#define RSYSL_ETMRSCTLR8()		MRC(14, 1, c1, c8, 0)
+#define RSYSL_ETMRSCTLR9()		MRC(14, 1, c1, c9, 0)
+#define RSYSL_ETMRSCTLR16()		MRC(14, 1, c1, c0, 1)
+#define RSYSL_ETMRSCTLR17()		MRC(14, 1, c1, c1, 1)
+#define RSYSL_ETMRSCTLR18()		MRC(14, 1, c1, c2, 1)
+#define RSYSL_ETMRSCTLR19()		MRC(14, 1, c1, c3, 1)
+#define RSYSL_ETMRSCTLR20()		MRC(14, 1, c1, c4, 1)
+#define RSYSL_ETMRSCTLR21()		MRC(14, 1, c1, c5, 1)
+#define RSYSL_ETMRSCTLR22()		MRC(14, 1, c1, c6, 1)
+#define RSYSL_ETMRSCTLR23()		MRC(14, 1, c1, c7, 1)
+#define RSYSL_ETMRSCTLR24()		MRC(14, 1, c1, c8, 1)
+#define RSYSL_ETMRSCTLR25()		MRC(14, 1, c1, c9, 1)
+#define RSYSL_ETMRSCTLR26()		MRC(14, 1, c1, c10, 1)
+#define RSYSL_ETMRSCTLR27()		MRC(14, 1, c1, c11, 1)
+#define RSYSL_ETMRSCTLR28()		MRC(14, 1, c1, c12, 1)
+#define RSYSL_ETMRSCTLR29()		MRC(14, 1, c1, c13, 1)
+#define RSYSL_ETMRSCTLR30()		MRC(14, 1, c1, c14, 1)
+#define RSYSL_ETMRSCTLR31()		MRC(14, 1, c1, c15, 1)
+#define RSYSL_ETMSEQEVR0()		MRC(14, 1, c0, c0, 4)
+#define RSYSL_ETMSEQEVR1()		MRC(14, 1, c0, c1, 4)
+#define RSYSL_ETMSEQEVR2()		MRC(14, 1, c0, c2, 4)
+#define RSYSL_ETMSEQRSTEVR()		MRC(14, 1, c0, c6, 4)
+#define RSYSL_ETMSEQSTR()		MRC(14, 1, c0, c7, 4)
+#define RSYSL_ETMSTALLCTLR()		MRC(14, 1, c0, c11, 0)
+#define RSYSL_ETMSTATR()		MRC(14, 1, c0, c3, 0)
+#define RSYSL_ETMSYNCPR()		MRC(14, 1, c0, c13, 0)
+#define RSYSL_ETMTRACEIDR()		MRC(14, 1, c0, c0, 1)
+#define RSYSL_ETMTSCTLR()		MRC(14, 1, c0, c12, 0)
+#define RSYSL_ETMVICTLR()		MRC(14, 1, c0, c0, 2)
+#define RSYSL_ETMVIIECTLR()		MRC(14, 1, c0, c1, 2)
+#define RSYSL_ETMVISSCTLR()		MRC(14, 1, c0, c2, 2)
+#define RSYSL_ETMSSCCR0()		MRC(14, 1, c1, c0, 2)
+#define RSYSL_ETMSSCCR1()		MRC(14, 1, c1, c1, 2)
+#define RSYSL_ETMSSCCR2()		MRC(14, 1, c1, c2, 2)
+#define RSYSL_ETMSSCCR3()		MRC(14, 1, c1, c3, 2)
+#define RSYSL_ETMSSCCR4()		MRC(14, 1, c1, c4, 2)
+#define RSYSL_ETMSSCCR5()		MRC(14, 1, c1, c5, 2)
+#define RSYSL_ETMSSCCR6()		MRC(14, 1, c1, c6, 2)
+#define RSYSL_ETMSSCCR7()		MRC(14, 1, c1, c7, 2)
+#define RSYSL_ETMSSCSR0()		MRC(14, 1, c1, c8, 2)
+#define RSYSL_ETMSSCSR1()		MRC(14, 1, c1, c9, 2)
+#define RSYSL_ETMSSCSR2()		MRC(14, 1, c1, c10, 2)
+#define RSYSL_ETMSSCSR3()		MRC(14, 1, c1, c11, 2)
+#define RSYSL_ETMSSCSR4()		MRC(14, 1, c1, c12, 2)
+#define RSYSL_ETMSSCSR5()		MRC(14, 1, c1, c13, 2)
+#define RSYSL_ETMSSCSR6()		MRC(14, 1, c1, c14, 2)
+#define RSYSL_ETMSSCSR7()		MRC(14, 1, c1, c15, 2)
+#define RSYSL_ETMSSPCICR0()		MRC(14, 1, c1, c0, 3)
+#define RSYSL_ETMSSPCICR1()		MRC(14, 1, c1, c1, 3)
+#define RSYSL_ETMSSPCICR2()		MRC(14, 1, c1, c2, 3)
+#define RSYSL_ETMSSPCICR3()		MRC(14, 1, c1, c3, 3)
+#define RSYSL_ETMSSPCICR4()		MRC(14, 1, c1, c4, 3)
+#define RSYSL_ETMSSPCICR5()		MRC(14, 1, c1, c5, 3)
+#define RSYSL_ETMSSPCICR6()		MRC(14, 1, c1, c6, 3)
+#define RSYSL_ETMSSPCICR7()		MRC(14, 1, c1, c7, 3)
+
+/*
+ * 64 bit registers, ignore the upper 32bit
+ * A read from a 32-bit register location using a 64-bit access result
+ * in the upper 32bits being return as RES0.
+ */
+#define RSYSL_ETMACATR0()		MRC(14, 1, c2, c0, 2)
+#define RSYSL_ETMACATR1()		MRC(14, 1, c2, c2, 2)
+#define RSYSL_ETMACATR2()		MRC(14, 1, c2, c4, 2)
+#define RSYSL_ETMACATR3()		MRC(14, 1, c2, c6, 2)
+#define RSYSL_ETMACATR4()		MRC(14, 1, c2, c8, 2)
+#define RSYSL_ETMACATR5()		MRC(14, 1, c2, c10, 2)
+#define RSYSL_ETMACATR6()		MRC(14, 1, c2, c12, 2)
+#define RSYSL_ETMACATR7()		MRC(14, 1, c2, c14, 2)
+#define RSYSL_ETMACATR8()		MRC(14, 1, c2, c0, 3)
+#define RSYSL_ETMACATR9()		MRC(14, 1, c2, c2, 3)
+#define RSYSL_ETMACATR10()		MRC(14, 1, c2, c4, 3)
+#define RSYSL_ETMACATR11()		MRC(14, 1, c2, c6, 3)
+#define RSYSL_ETMACATR12()		MRC(14, 1, c2, c8, 3)
+#define RSYSL_ETMACATR13()		MRC(14, 1, c2, c10, 3)
+#define RSYSL_ETMACATR14()		MRC(14, 1, c2, c12, 3)
+#define RSYSL_ETMACATR15()		MRC(14, 1, c2, c14, 3)
+#define RSYSL_ETMCIDCVR0()		MRC(14, 1, c3, c0, 0)
+#define RSYSL_ETMCIDCVR1()		MRC(14, 1, c3, c2, 0)
+#define RSYSL_ETMCIDCVR2()		MRC(14, 1, c3, c4, 0)
+#define RSYSL_ETMCIDCVR3()		MRC(14, 1, c3, c6, 0)
+#define RSYSL_ETMCIDCVR4()		MRC(14, 1, c3, c8, 0)
+#define RSYSL_ETMCIDCVR5()		MRC(14, 1, c3, c10, 0)
+#define RSYSL_ETMCIDCVR6()		MRC(14, 1, c3, c12, 0)
+#define RSYSL_ETMCIDCVR7()		MRC(14, 1, c3, c14, 0)
+#define RSYSL_ETMACVR0()		MRC(14, 1, c2, c0, 0)
+#define RSYSL_ETMACVR1()		MRC(14, 1, c2, c2, 0)
+#define RSYSL_ETMACVR2()		MRC(14, 1, c2, c4, 0)
+#define RSYSL_ETMACVR3()		MRC(14, 1, c2, c6, 0)
+#define RSYSL_ETMACVR4()		MRC(14, 1, c2, c8, 0)
+#define RSYSL_ETMACVR5()		MRC(14, 1, c2, c10, 0)
+#define RSYSL_ETMACVR6()		MRC(14, 1, c2, c12, 0)
+#define RSYSL_ETMACVR7()		MRC(14, 1, c2, c14, 0)
+#define RSYSL_ETMACVR8()		MRC(14, 1, c2, c0, 1)
+#define RSYSL_ETMACVR9()		MRC(14, 1, c2, c2, 1)
+#define RSYSL_ETMACVR10()		MRC(14, 1, c2, c4, 1)
+#define RSYSL_ETMACVR11()		MRC(14, 1, c2, c6, 1)
+#define RSYSL_ETMACVR12()		MRC(14, 1, c2, c8, 1)
+#define RSYSL_ETMACVR13()		MRC(14, 1, c2, c10, 1)
+#define RSYSL_ETMACVR14()		MRC(14, 1, c2, c12, 1)
+#define RSYSL_ETMACVR15()		MRC(14, 1, c2, c14, 1)
+#define RSYSL_ETMVMIDCVR0()		MRC(14, 1, c3, c0, 1)
+#define RSYSL_ETMVMIDCVR1()		MRC(14, 1, c3, c2, 1)
+#define RSYSL_ETMVMIDCVR2()		MRC(14, 1, c3, c4, 1)
+#define RSYSL_ETMVMIDCVR3()		MRC(14, 1, c3, c6, 1)
+#define RSYSL_ETMVMIDCVR4()		MRC(14, 1, c3, c8, 1)
+#define RSYSL_ETMVMIDCVR5()		MRC(14, 1, c3, c10, 1)
+#define RSYSL_ETMVMIDCVR6()		MRC(14, 1, c3, c12, 1)
+#define RSYSL_ETMVMIDCVR7()		MRC(14, 1, c3, c14, 1)
+#define RSYSL_ETMDVCVR0()		MRC(14, 1, c2, c0, 4)
+#define RSYSL_ETMDVCVR1()		MRC(14, 1, c2, c4, 4)
+#define RSYSL_ETMDVCVR2()		MRC(14, 1, c2, c8, 4)
+#define RSYSL_ETMDVCVR3()		MRC(14, 1, c2, c12, 4)
+#define RSYSL_ETMDVCVR4()		MRC(14, 1, c2, c0, 5)
+#define RSYSL_ETMDVCVR5()		MRC(14, 1, c2, c4, 5)
+#define RSYSL_ETMDVCVR6()		MRC(14, 1, c2, c8, 5)
+#define RSYSL_ETMDVCVR7()		MRC(14, 1, c2, c12, 5)
+#define RSYSL_ETMDVCMR0()		MRC(14, 1, c2, c0, 6)
+#define RSYSL_ETMDVCMR1()		MRC(14, 1, c2, c4, 6)
+#define RSYSL_ETMDVCMR2()		MRC(14, 1, c2, c8, 6)
+#define RSYSL_ETMDVCMR3()		MRC(14, 1, c2, c12, 6)
+#define RSYSL_ETMDVCMR4()		MRC(14, 1, c2, c0, 7)
+#define RSYSL_ETMDVCMR5()		MRC(14, 1, c2, c4, 7)
+#define RSYSL_ETMDVCMR6()		MRC(14, 1, c2, c8, 7)
+#define RSYSL_ETMDVCMR7()		MRC(14, 1, c2, c12, 7)
+
+/*
+ * 32 and 64 bit registers
+ * A write to a 32-bit register location using a 64-bit access result
+ * in the upper 32bit of access
+ */
+#define WSYS_ETMAUXCTLR(val)		MCR(val, 14, 1, c0, c6, 0)
+#define WSYS_ETMACATR0(val)		MCR(val, 14, 1, c2, c0, 2)
+#define WSYS_ETMACATR1(val)		MCR(val, 14, 1, c2, c2, 2)
+#define WSYS_ETMACATR2(val)		MCR(val, 14, 1, c2, c4, 2)
+#define WSYS_ETMACATR3(val)		MCR(val, 14, 1, c2, c6, 2)
+#define WSYS_ETMACATR4(val)		MCR(val, 14, 1, c2, c8, 2)
+#define WSYS_ETMACATR5(val)		MCR(val, 14, 1, c2, c10, 2)
+#define WSYS_ETMACATR6(val)		MCR(val, 14, 1, c2, c12, 2)
+#define WSYS_ETMACATR7(val)		MCR(val, 14, 1, c2, c14, 2)
+#define WSYS_ETMACATR8(val)		MCR(val, 14, 1, c2, c0, 3)
+#define WSYS_ETMACATR9(val)		MCR(val, 14, 1, c2, c2, 3)
+#define WSYS_ETMACATR10(val)		MCR(val, 14, 1, c2, c4, 3)
+#define WSYS_ETMACATR11(val)		MCR(val, 14, 1, c2, c6, 3)
+#define WSYS_ETMACATR12(val)		MCR(val, 14, 1, c2, c8, 3)
+#define WSYS_ETMACATR13(val)		MCR(val, 14, 1, c2, c10, 3)
+#define WSYS_ETMACATR14(val)		MCR(val, 14, 1, c2, c12, 3)
+#define WSYS_ETMACATR15(val)		MCR(val, 14, 1, c2, c14, 3)
+#define WSYS_ETMACVR0(val)		MCR(val, 14, 1, c2, c0, 0)
+#define WSYS_ETMACVR1(val)		MCR(val, 14, 1, c2, c2, 0)
+#define WSYS_ETMACVR2(val)		MCR(val, 14, 1, c2, c4, 0)
+#define WSYS_ETMACVR3(val)		MCR(val, 14, 1, c2, c6, 0)
+#define WSYS_ETMACVR4(val)		MCR(val, 14, 1, c2, c8, 0)
+#define WSYS_ETMACVR5(val)		MCR(val, 14, 1, c2, c10, 0)
+#define WSYS_ETMACVR6(val)		MCR(val, 14, 1, c2, c12, 0)
+#define WSYS_ETMACVR7(val)		MCR(val, 14, 1, c2, c14, 0)
+#define WSYS_ETMACVR8(val)		MCR(val, 14, 1, c2, c0, 1)
+#define WSYS_ETMACVR9(val)		MCR(val, 14, 1, c2, c2, 1)
+#define WSYS_ETMACVR10(val)		MCR(val, 14, 1, c2, c4, 1)
+#define WSYS_ETMACVR11(val)		MCR(val, 14, 1, c2, c6, 1)
+#define WSYS_ETMACVR12(val)		MCR(val, 14, 1, c2, c8, 1)
+#define WSYS_ETMACVR13(val)		MCR(val, 14, 1, c2, c10, 1)
+#define WSYS_ETMACVR14(val)		MCR(val, 14, 1, c2, c12, 1)
+#define WSYS_ETMACVR15(val)		MCR(val, 14, 1, c2, c14, 1)
+#define WSYS_ETMCCCTLR(val)		MCR(val, 14, 1, c0, c14, 0)
+#define WSYS_ETMCIDCCTLR0(val)		MCR(val, 14, 1, c3, c0, 2)
+#define WSYS_ETMCIDCVR0(val)		MCR(val, 14, 1, c3, c0, 0)
+#define WSYS_ETMCIDCVR1(val)		MCR(val, 14, 1, c3, c2, 0)
+#define WSYS_ETMCIDCVR2(val)		MCR(val, 14, 1, c3, c4, 0)
+#define WSYS_ETMCIDCVR3(val)		MCR(val, 14, 1, c3, c6, 0)
+#define WSYS_ETMCIDCVR4(val)		MCR(val, 14, 1, c3, c8, 0)
+#define WSYS_ETMCIDCVR5(val)		MCR(val, 14, 1, c3, c10, 0)
+#define WSYS_ETMCIDCVR6(val)		MCR(val, 14, 1, c3, c12, 0)
+#define WSYS_ETMCIDCVR7(val)		MCR(val, 14, 1, c3, c14, 0)
+#define WSYS_ETMCNTCTLR0(val)		MCR(val, 14, 1, c0, c4, 5)
+#define WSYS_ETMCNTCTLR1(val)		MCR(val, 14, 1, c0, c5, 5)
+#define WSYS_ETMCNTCTLR2(val)		MCR(val, 14, 1, c0, c6, 5)
+#define WSYS_ETMCNTCTLR3(val)		MCR(val, 14, 1, c0, c7, 5)
+#define WSYS_ETMCNTRLDVR0(val)		MCR(val, 14, 1, c0, c0, 5)
+#define WSYS_ETMCNTRLDVR1(val)		MCR(val, 14, 1, c0, c1, 5)
+#define WSYS_ETMCNTRLDVR2(val)		MCR(val, 14, 1, c0, c2, 5)
+#define WSYS_ETMCNTRLDVR3(val)		MCR(val, 14, 1, c0, c3, 5)
+#define WSYS_ETMCNTVR0(val)		MCR(val, 14, 1, c0, c8, 5)
+#define WSYS_ETMCNTVR1(val)		MCR(val, 14, 1, c0, c9, 5)
+#define WSYS_ETMCNTVR2(val)		MCR(val, 14, 1, c0, c10, 5)
+#define WSYS_ETMCNTVR3(val)		MCR(val, 14, 1, c0, c11, 5)
+#define WSYS_ETMCONFIGR(val)		MCR(val, 14, 1, c0, c4, 0)
+#define WSYS_ETMEVENTCTL0R(val)		MCR(val, 14, 1, c0, c8, 0)
+#define WSYS_ETMEVENTCTL1R(val)		MCR(val, 14, 1, c0, c9, 0)
+#define WSYS_ETMEXTINSELR(val)		MCR(val, 14, 1, c0, c8, 4)
+#define WSYS_ETMIMSPEC0(val)		MCR(val, 14, 1, c0, c0, 7)
+#define WSYS_ETMOSLAR(val)		MCR(val, 14, 1, c1, c0, 4)
+#define WSYS_ETMPRGCTLR(val)		MCR(val, 14, 1, c0, c1, 0)
+#define WSYS_ETMRSCTLR10(val)		MCR(val, 14, 1, c1, c10, 0)
+#define WSYS_ETMRSCTLR11(val)		MCR(val, 14, 1, c1, c11, 0)
+#define WSYS_ETMRSCTLR12(val)		MCR(val, 14, 1, c1, c12, 0)
+#define WSYS_ETMRSCTLR13(val)		MCR(val, 14, 1, c1, c13, 0)
+#define WSYS_ETMRSCTLR14(val)		MCR(val, 14, 1, c1, c14, 0)
+#define WSYS_ETMRSCTLR15(val)		MCR(val, 14, 1, c1, c15, 0)
+#define WSYS_ETMRSCTLR2(val)		MCR(val, 14, 1, c1, c2, 0)
+#define WSYS_ETMRSCTLR3(val)		MCR(val, 14, 1, c1, c3, 0)
+#define WSYS_ETMRSCTLR4(val)		MCR(val, 14, 1, c1, c4, 0)
+#define WSYS_ETMRSCTLR5(val)		MCR(val, 14, 1, c1, c5, 0)
+#define WSYS_ETMRSCTLR6(val)		MCR(val, 14, 1, c1, c6, 0)
+#define WSYS_ETMRSCTLR7(val)		MCR(val, 14, 1, c1, c7, 0)
+#define WSYS_ETMRSCTLR8(val)		MCR(val, 14, 1, c1, c8, 0)
+#define WSYS_ETMRSCTLR9(val)		MCR(val, 14, 1, c1, c9, 0)
+#define WSYS_ETMRSCTLR16(val)		MCR(val, 14, 1, c1, c0, 1)
+#define WSYS_ETMRSCTLR17(val)		MCR(val, 14, 1, c1, c1, 1)
+#define WSYS_ETMRSCTLR18(val)		MCR(val, 14, 1, c1, c2, 1)
+#define WSYS_ETMRSCTLR19(val)		MCR(val, 14, 1, c1, c3, 1)
+#define WSYS_ETMRSCTLR20(val)		MCR(val, 14, 1, c1, c4, 1)
+#define WSYS_ETMRSCTLR21(val)		MCR(val, 14, 1, c1, c5, 1)
+#define WSYS_ETMRSCTLR22(val)		MCR(val, 14, 1, c1, c6, 1)
+#define WSYS_ETMRSCTLR23(val)		MCR(val, 14, 1, c1, c7, 1)
+#define WSYS_ETMRSCTLR24(val)		MCR(val, 14, 1, c1, c8, 1)
+#define WSYS_ETMRSCTLR25(val)		MCR(val, 14, 1, c1, c9, 1)
+#define WSYS_ETMRSCTLR26(val)		MCR(val, 14, 1, c1, c10, 1)
+#define WSYS_ETMRSCTLR27(val)		MCR(val, 14, 1, c1, c11, 1)
+#define WSYS_ETMRSCTLR28(val)		MCR(val, 14, 1, c1, c12, 1)
+#define WSYS_ETMRSCTLR29(val)		MCR(val, 14, 1, c1, c13, 1)
+#define WSYS_ETMRSCTLR30(val)		MCR(val, 14, 1, c1, c14, 1)
+#define WSYS_ETMRSCTLR31(val)		MCR(val, 14, 1, c1, c15, 1)
+#define WSYS_ETMSEQEVR0(val)		MCR(val, 14, 1, c0, c0, 4)
+#define WSYS_ETMSEQEVR1(val)		MCR(val, 14, 1, c0, c1, 4)
+#define WSYS_ETMSEQEVR2(val)		MCR(val, 14, 1, c0, c2, 4)
+#define WSYS_ETMSEQRSTEVR(val)		MCR(val, 14, 1, c0, c6, 4)
+#define WSYS_ETMSEQSTR(val)		MCR(val, 14, 1, c0, c7, 4)
+#define WSYS_ETMSTALLCTLR(val)		MCR(val, 14, 1, c0, c11, 0)
+#define WSYS_ETMSYNCPR(val)		MCR(val, 14, 1, c0, c13, 0)
+#define WSYS_ETMTRACEIDR(val)		MCR(val, 14, 1, c0, c0, 1)
+#define WSYS_ETMTSCTLR(val)		MCR(val, 14, 1, c0, c12, 0)
+#define WSYS_ETMVICTLR(val)		MCR(val, 14, 1, c0, c0, 2)
+#define WSYS_ETMVIIECTLR(val)		MCR(val, 14, 1, c0, c1, 2)
+#define WSYS_ETMVISSCTLR(val)		MCR(val, 14, 1, c0, c2, 2)
+#define WSYS_ETMVMIDCVR0(val)		MCR(val, 14, 1, c3, c0, 1)
+#define WSYS_ETMVMIDCVR1(val)		MCR(val, 14, 1, c3, c2, 1)
+#define WSYS_ETMVMIDCVR2(val)		MCR(val, 14, 1, c3, c4, 1)
+#define WSYS_ETMVMIDCVR3(val)		MCR(val, 14, 1, c3, c6, 1)
+#define WSYS_ETMVMIDCVR4(val)		MCR(val, 14, 1, c3, c8, 1)
+#define WSYS_ETMVMIDCVR5(val)		MCR(val, 14, 1, c3, c10, 1)
+#define WSYS_ETMVMIDCVR6(val)		MCR(val, 14, 1, c3, c12, 1)
+#define WSYS_ETMVMIDCVR7(val)		MCR(val, 14, 1, c3, c14, 1)
+#define WSYS_ETMDVCVR0(val)		MCR(val, 14, 1, c2, c0, 4)
+#define WSYS_ETMDVCVR1(val)		MCR(val, 14, 1, c2, c4, 4)
+#define WSYS_ETMDVCVR2(val)		MCR(val, 14, 1, c2, c8, 4)
+#define WSYS_ETMDVCVR3(val)		MCR(val, 14, 1, c2, c12, 4)
+#define WSYS_ETMDVCVR4(val)		MCR(val, 14, 1, c2, c0, 5)
+#define WSYS_ETMDVCVR5(val)		MCR(val, 14, 1, c2, c4, 5)
+#define WSYS_ETMDVCVR6(val)		MCR(val, 14, 1, c2, c8, 5)
+#define WSYS_ETMDVCVR7(val)		MCR(val, 14, 1, c2, c12, 5)
+#define WSYS_ETMDVCMR0(val)		MCR(val, 14, 1, c2, c0, 6)
+#define WSYS_ETMDVCMR1(val)		MCR(val, 14, 1, c2, c4, 6)
+#define WSYS_ETMDVCMR2(val)		MCR(val, 14, 1, c2, c8, 6)
+#define WSYS_ETMDVCMR3(val)		MCR(val, 14, 1, c2, c12, 6)
+#define WSYS_ETMDVCMR4(val)		MCR(val, 14, 1, c2, c0, 7)
+#define WSYS_ETMDVCMR5(val)		MCR(val, 14, 1, c2, c4, 7)
+#define WSYS_ETMDVCMR6(val)		MCR(val, 14, 1, c2, c8, 7)
+#define WSYS_ETMDVCMR7(val)		MCR(val, 14, 1, c2, c12, 7)
+#define WSYS_ETMSSCCR0(val)		MCR(val, 14, 1, c1, c0, 2)
+#define WSYS_ETMSSCCR1(val)		MCR(val, 14, 1, c1, c1, 2)
+#define WSYS_ETMSSCCR2(val)		MCR(val, 14, 1, c1, c2, 2)
+#define WSYS_ETMSSCCR3(val)		MCR(val, 14, 1, c1, c3, 2)
+#define WSYS_ETMSSCCR4(val)		MCR(val, 14, 1, c1, c4, 2)
+#define WSYS_ETMSSCCR5(val)		MCR(val, 14, 1, c1, c5, 2)
+#define WSYS_ETMSSCCR6(val)		MCR(val, 14, 1, c1, c6, 2)
+#define WSYS_ETMSSCCR7(val)		MCR(val, 14, 1, c1, c7, 2)
+#define WSYS_ETMSSCSR0(val)		MCR(val, 14, 1, c1, c8, 2)
+#define WSYS_ETMSSCSR1(val)		MCR(val, 14, 1, c1, c9, 2)
+#define WSYS_ETMSSCSR2(val)		MCR(val, 14, 1, c1, c10, 2)
+#define WSYS_ETMSSCSR3(val)		MCR(val, 14, 1, c1, c11, 2)
+#define WSYS_ETMSSCSR4(val)		MCR(val, 14, 1, c1, c12, 2)
+#define WSYS_ETMSSCSR5(val)		MCR(val, 14, 1, c1, c13, 2)
+#define WSYS_ETMSSCSR6(val)		MCR(val, 14, 1, c1, c14, 2)
+#define WSYS_ETMSSCSR7(val)		MCR(val, 14, 1, c1, c15, 2)
+#define WSYS_ETMSSPCICR0(val)		MCR(val, 14, 1, c1, c0, 3)
+#define WSYS_ETMSSPCICR1(val)		MCR(val, 14, 1, c1, c1, 3)
+#define WSYS_ETMSSPCICR2(val)		MCR(val, 14, 1, c1, c2, 3)
+#define WSYS_ETMSSPCICR3(val)		MCR(val, 14, 1, c1, c3, 3)
+#define WSYS_ETMSSPCICR4(val)		MCR(val, 14, 1, c1, c4, 3)
+#define WSYS_ETMSSPCICR5(val)		MCR(val, 14, 1, c1, c5, 3)
+#define WSYS_ETMSSPCICR6(val)		MCR(val, 14, 1, c1, c6, 3)
+#define WSYS_ETMSSPCICR7(val)		MCR(val, 14, 1, c1, c7, 3)
+
+#endif
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 6795368..cc41438 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -128,20 +128,10 @@
 #endif /* !SMP */
 
 static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret, tmp;
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
-
 #ifndef CONFIG_SMP
 	preempt_disable();
 #endif
@@ -172,17 +162,9 @@
 	preempt_enable();
 #endif
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/arm/include/asm/hardware/debugv8.h b/arch/arm/include/asm/hardware/debugv8.h
new file mode 100644
index 0000000..a8249cd
--- /dev/null
+++ b/arch/arm/include/asm/hardware/debugv8.h
@@ -0,0 +1,247 @@
+/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_HARDWARE_DEBUGV8_H
+#define __ASM_HARDWARE_DEBUGV8_H
+
+#include <linux/types.h>
+
+/* Accessors for CP14 registers */
+#define dbg_read(reg)			RCP14_##reg()
+#define dbg_write(val, reg)		WCP14_##reg(val)
+
+/* MRC14 registers */
+#define MRC14(op1, crn, crm, op2)					\
+({									\
+uint32_t val;								\
+asm volatile("mrc p14, "#op1", %0, "#crn", "#crm", "#op2 : "=r" (val));	\
+val;									\
+})
+
+/* MCR14 registers */
+#define MCR14(val, op1, crn, crm, op2)					\
+({									\
+asm volatile("mcr p14, "#op1", %0, "#crn", "#crm", "#op2 : : "r" (val));\
+})
+
+/*
+ * Debug Registers
+ *
+ * Read only
+ * DBGDIDR, DBGDSCRint, DBGDTRRXint, DBGDRAR, DBGOSLSR, DBGOSSRR, DBGDSAR,
+ * DBGAUTHSTATUS, DBGDEVID2, DBGDEVID1, DBGDEVID
+ *
+ * Write only
+ * DBGDTRTXint, DBGOSLAR
+ */
+#define RCP14_DBGDIDR()			MRC14(0, c0, c0, 0)
+#define RCP14_DBGDSCRint()		MRC14(0, c0, c1, 0)
+#define RCP14_DBGDCCINT()		MRC14(0, c0, c2, 0)
+#define RCP14_DBGDTRRXint()		MRC14(0, c0, c5, 0)
+#define RCP14_DBGWFAR()			MRC14(0, c0, c6, 0)
+#define RCP14_DBGVCR()			MRC14(0, c0, c7, 0)
+#define RCP14_DBGDTRRXext()		MRC14(0, c0, c0, 2)
+#define RCP14_DBGDSCRext()		MRC14(0, c0, c2, 2)
+#define RCP14_DBGDTRTXext()		MRC14(0, c0, c3, 2)
+#define RCP14_DBGOSECCR()		MRC14(0, c0, c6, 2)
+#define RCP14_DBGBVR0()			MRC14(0, c0, c0, 4)
+#define RCP14_DBGBVR1()			MRC14(0, c0, c1, 4)
+#define RCP14_DBGBVR2()			MRC14(0, c0, c2, 4)
+#define RCP14_DBGBVR3()			MRC14(0, c0, c3, 4)
+#define RCP14_DBGBVR4()			MRC14(0, c0, c4, 4)
+#define RCP14_DBGBVR5()			MRC14(0, c0, c5, 4)
+#define RCP14_DBGBVR6()			MRC14(0, c0, c6, 4)
+#define RCP14_DBGBVR7()			MRC14(0, c0, c7, 4)
+#define RCP14_DBGBVR8()			MRC14(0, c0, c8, 4)
+#define RCP14_DBGBVR9()			MRC14(0, c0, c9, 4)
+#define RCP14_DBGBVR10()		MRC14(0, c0, c10, 4)
+#define RCP14_DBGBVR11()		MRC14(0, c0, c11, 4)
+#define RCP14_DBGBVR12()		MRC14(0, c0, c12, 4)
+#define RCP14_DBGBVR13()		MRC14(0, c0, c13, 4)
+#define RCP14_DBGBVR14()		MRC14(0, c0, c14, 4)
+#define RCP14_DBGBVR15()		MRC14(0, c0, c15, 4)
+#define RCP14_DBGBCR0()			MRC14(0, c0, c0, 5)
+#define RCP14_DBGBCR1()			MRC14(0, c0, c1, 5)
+#define RCP14_DBGBCR2()			MRC14(0, c0, c2, 5)
+#define RCP14_DBGBCR3()			MRC14(0, c0, c3, 5)
+#define RCP14_DBGBCR4()			MRC14(0, c0, c4, 5)
+#define RCP14_DBGBCR5()			MRC14(0, c0, c5, 5)
+#define RCP14_DBGBCR6()			MRC14(0, c0, c6, 5)
+#define RCP14_DBGBCR7()			MRC14(0, c0, c7, 5)
+#define RCP14_DBGBCR8()			MRC14(0, c0, c8, 5)
+#define RCP14_DBGBCR9()			MRC14(0, c0, c9, 5)
+#define RCP14_DBGBCR10()		MRC14(0, c0, c10, 5)
+#define RCP14_DBGBCR11()		MRC14(0, c0, c11, 5)
+#define RCP14_DBGBCR12()		MRC14(0, c0, c12, 5)
+#define RCP14_DBGBCR13()		MRC14(0, c0, c13, 5)
+#define RCP14_DBGBCR14()		MRC14(0, c0, c14, 5)
+#define RCP14_DBGBCR15()		MRC14(0, c0, c15, 5)
+#define RCP14_DBGWVR0()			MRC14(0, c0, c0, 6)
+#define RCP14_DBGWVR1()			MRC14(0, c0, c1, 6)
+#define RCP14_DBGWVR2()			MRC14(0, c0, c2, 6)
+#define RCP14_DBGWVR3()			MRC14(0, c0, c3, 6)
+#define RCP14_DBGWVR4()			MRC14(0, c0, c4, 6)
+#define RCP14_DBGWVR5()			MRC14(0, c0, c5, 6)
+#define RCP14_DBGWVR6()			MRC14(0, c0, c6, 6)
+#define RCP14_DBGWVR7()			MRC14(0, c0, c7, 6)
+#define RCP14_DBGWVR8()			MRC14(0, c0, c8, 6)
+#define RCP14_DBGWVR9()			MRC14(0, c0, c9, 6)
+#define RCP14_DBGWVR10()		MRC14(0, c0, c10, 6)
+#define RCP14_DBGWVR11()		MRC14(0, c0, c11, 6)
+#define RCP14_DBGWVR12()		MRC14(0, c0, c12, 6)
+#define RCP14_DBGWVR13()		MRC14(0, c0, c13, 6)
+#define RCP14_DBGWVR14()		MRC14(0, c0, c14, 6)
+#define RCP14_DBGWVR15()		MRC14(0, c0, c15, 6)
+#define RCP14_DBGWCR0()			MRC14(0, c0, c0, 7)
+#define RCP14_DBGWCR1()			MRC14(0, c0, c1, 7)
+#define RCP14_DBGWCR2()			MRC14(0, c0, c2, 7)
+#define RCP14_DBGWCR3()			MRC14(0, c0, c3, 7)
+#define RCP14_DBGWCR4()			MRC14(0, c0, c4, 7)
+#define RCP14_DBGWCR5()			MRC14(0, c0, c5, 7)
+#define RCP14_DBGWCR6()			MRC14(0, c0, c6, 7)
+#define RCP14_DBGWCR7()			MRC14(0, c0, c7, 7)
+#define RCP14_DBGWCR8()			MRC14(0, c0, c8, 7)
+#define RCP14_DBGWCR9()			MRC14(0, c0, c9, 7)
+#define RCP14_DBGWCR10()		MRC14(0, c0, c10, 7)
+#define RCP14_DBGWCR11()		MRC14(0, c0, c11, 7)
+#define RCP14_DBGWCR12()		MRC14(0, c0, c12, 7)
+#define RCP14_DBGWCR13()		MRC14(0, c0, c13, 7)
+#define RCP14_DBGWCR14()		MRC14(0, c0, c14, 7)
+#define RCP14_DBGWCR15()		MRC14(0, c0, c15, 7)
+#define RCP14_DBGDRAR()			MRC14(0, c1, c0, 0)
+#define RCP14_DBGBXVR0()		MRC14(0, c1, c0, 1)
+#define RCP14_DBGBXVR1()		MRC14(0, c1, c1, 1)
+#define RCP14_DBGBXVR2()		MRC14(0, c1, c2, 1)
+#define RCP14_DBGBXVR3()		MRC14(0, c1, c3, 1)
+#define RCP14_DBGBXVR4()		MRC14(0, c1, c4, 1)
+#define RCP14_DBGBXVR5()		MRC14(0, c1, c5, 1)
+#define RCP14_DBGBXVR6()		MRC14(0, c1, c6, 1)
+#define RCP14_DBGBXVR7()		MRC14(0, c1, c7, 1)
+#define RCP14_DBGBXVR8()		MRC14(0, c1, c8, 1)
+#define RCP14_DBGBXVR9()		MRC14(0, c1, c9, 1)
+#define RCP14_DBGBXVR10()		MRC14(0, c1, c10, 1)
+#define RCP14_DBGBXVR11()		MRC14(0, c1, c11, 1)
+#define RCP14_DBGBXVR12()		MRC14(0, c1, c12, 1)
+#define RCP14_DBGBXVR13()		MRC14(0, c1, c13, 1)
+#define RCP14_DBGBXVR14()		MRC14(0, c1, c14, 1)
+#define RCP14_DBGBXVR15()		MRC14(0, c1, c15, 1)
+#define RCP14_DBGOSLSR()		MRC14(0, c1, c1, 4)
+#define RCP14_DBGOSSRR()		MRC14(0, c1, c2, 4)
+#define RCP14_DBGOSDLR()		MRC14(0, c1, c3, 4)
+#define RCP14_DBGPRCR()			MRC14(0, c1, c4, 4)
+#define RCP14_DBGPRSR()			MRC14(0, c1, c5, 4)
+#define RCP14_DBGDSAR()			MRC14(0, c2, c0, 0)
+#define RCP14_DBGITCTRL()		MRC14(0, c7, c0, 4)
+#define RCP14_DBGCLAIMSET()		MRC14(0, c7, c8, 6)
+#define RCP14_DBGCLAIMCLR()		MRC14(0, c7, c9, 6)
+#define RCP14_DBGAUTHSTATUS()		MRC14(0, c7, c14, 6)
+#define RCP14_DBGDEVID2()		MRC14(0, c7, c0, 7)
+#define RCP14_DBGDEVID1()		MRC14(0, c7, c1, 7)
+#define RCP14_DBGDEVID()		MRC14(0, c7, c2, 7)
+
+#define WCP14_DBGDCCINT(val)		MCR14(val, 0, c0, c2, 0)
+#define WCP14_DBGDTRTXint(val)		MCR14(val, 0, c0, c5, 0)
+#define WCP14_DBGWFAR(val)		MCR14(val, 0, c0, c6, 0)
+#define WCP14_DBGVCR(val)		MCR14(val, 0, c0, c7, 0)
+#define WCP14_DBGDTRRXext(val)		MCR14(val, 0, c0, c0, 2)
+#define WCP14_DBGDSCRext(val)		MCR14(val, 0, c0, c2, 2)
+#define WCP14_DBGDTRTXext(val)		MCR14(val, 0, c0, c3, 2)
+#define WCP14_DBGOSECCR(val)		MCR14(val, 0, c0, c6, 2)
+#define WCP14_DBGBVR0(val)		MCR14(val, 0, c0, c0, 4)
+#define WCP14_DBGBVR1(val)		MCR14(val, 0, c0, c1, 4)
+#define WCP14_DBGBVR2(val)		MCR14(val, 0, c0, c2, 4)
+#define WCP14_DBGBVR3(val)		MCR14(val, 0, c0, c3, 4)
+#define WCP14_DBGBVR4(val)		MCR14(val, 0, c0, c4, 4)
+#define WCP14_DBGBVR5(val)		MCR14(val, 0, c0, c5, 4)
+#define WCP14_DBGBVR6(val)		MCR14(val, 0, c0, c6, 4)
+#define WCP14_DBGBVR7(val)		MCR14(val, 0, c0, c7, 4)
+#define WCP14_DBGBVR8(val)		MCR14(val, 0, c0, c8, 4)
+#define WCP14_DBGBVR9(val)		MCR14(val, 0, c0, c9, 4)
+#define WCP14_DBGBVR10(val)		MCR14(val, 0, c0, c10, 4)
+#define WCP14_DBGBVR11(val)		MCR14(val, 0, c0, c11, 4)
+#define WCP14_DBGBVR12(val)		MCR14(val, 0, c0, c12, 4)
+#define WCP14_DBGBVR13(val)		MCR14(val, 0, c0, c13, 4)
+#define WCP14_DBGBVR14(val)		MCR14(val, 0, c0, c14, 4)
+#define WCP14_DBGBVR15(val)		MCR14(val, 0, c0, c15, 4)
+#define WCP14_DBGBCR0(val)		MCR14(val, 0, c0, c0, 5)
+#define WCP14_DBGBCR1(val)		MCR14(val, 0, c0, c1, 5)
+#define WCP14_DBGBCR2(val)		MCR14(val, 0, c0, c2, 5)
+#define WCP14_DBGBCR3(val)		MCR14(val, 0, c0, c3, 5)
+#define WCP14_DBGBCR4(val)		MCR14(val, 0, c0, c4, 5)
+#define WCP14_DBGBCR5(val)		MCR14(val, 0, c0, c5, 5)
+#define WCP14_DBGBCR6(val)		MCR14(val, 0, c0, c6, 5)
+#define WCP14_DBGBCR7(val)		MCR14(val, 0, c0, c7, 5)
+#define WCP14_DBGBCR8(val)		MCR14(val, 0, c0, c8, 5)
+#define WCP14_DBGBCR9(val)		MCR14(val, 0, c0, c9, 5)
+#define WCP14_DBGBCR10(val)		MCR14(val, 0, c0, c10, 5)
+#define WCP14_DBGBCR11(val)		MCR14(val, 0, c0, c11, 5)
+#define WCP14_DBGBCR12(val)		MCR14(val, 0, c0, c12, 5)
+#define WCP14_DBGBCR13(val)		MCR14(val, 0, c0, c13, 5)
+#define WCP14_DBGBCR14(val)		MCR14(val, 0, c0, c14, 5)
+#define WCP14_DBGBCR15(val)		MCR14(val, 0, c0, c15, 5)
+#define WCP14_DBGWVR0(val)		MCR14(val, 0, c0, c0, 6)
+#define WCP14_DBGWVR1(val)		MCR14(val, 0, c0, c1, 6)
+#define WCP14_DBGWVR2(val)		MCR14(val, 0, c0, c2, 6)
+#define WCP14_DBGWVR3(val)		MCR14(val, 0, c0, c3, 6)
+#define WCP14_DBGWVR4(val)		MCR14(val, 0, c0, c4, 6)
+#define WCP14_DBGWVR5(val)		MCR14(val, 0, c0, c5, 6)
+#define WCP14_DBGWVR6(val)		MCR14(val, 0, c0, c6, 6)
+#define WCP14_DBGWVR7(val)		MCR14(val, 0, c0, c7, 6)
+#define WCP14_DBGWVR8(val)		MCR14(val, 0, c0, c8, 6)
+#define WCP14_DBGWVR9(val)		MCR14(val, 0, c0, c9, 6)
+#define WCP14_DBGWVR10(val)		MCR14(val, 0, c0, c10, 6)
+#define WCP14_DBGWVR11(val)		MCR14(val, 0, c0, c11, 6)
+#define WCP14_DBGWVR12(val)		MCR14(val, 0, c0, c12, 6)
+#define WCP14_DBGWVR13(val)		MCR14(val, 0, c0, c13, 6)
+#define WCP14_DBGWVR14(val)		MCR14(val, 0, c0, c14, 6)
+#define WCP14_DBGWVR15(val)		MCR14(val, 0, c0, c15, 6)
+#define WCP14_DBGWCR0(val)		MCR14(val, 0, c0, c0, 7)
+#define WCP14_DBGWCR1(val)		MCR14(val, 0, c0, c1, 7)
+#define WCP14_DBGWCR2(val)		MCR14(val, 0, c0, c2, 7)
+#define WCP14_DBGWCR3(val)		MCR14(val, 0, c0, c3, 7)
+#define WCP14_DBGWCR4(val)		MCR14(val, 0, c0, c4, 7)
+#define WCP14_DBGWCR5(val)		MCR14(val, 0, c0, c5, 7)
+#define WCP14_DBGWCR6(val)		MCR14(val, 0, c0, c6, 7)
+#define WCP14_DBGWCR7(val)		MCR14(val, 0, c0, c7, 7)
+#define WCP14_DBGWCR8(val)		MCR14(val, 0, c0, c8, 7)
+#define WCP14_DBGWCR9(val)		MCR14(val, 0, c0, c9, 7)
+#define WCP14_DBGWCR10(val)		MCR14(val, 0, c0, c10, 7)
+#define WCP14_DBGWCR11(val)		MCR14(val, 0, c0, c11, 7)
+#define WCP14_DBGWCR12(val)		MCR14(val, 0, c0, c12, 7)
+#define WCP14_DBGWCR13(val)		MCR14(val, 0, c0, c13, 7)
+#define WCP14_DBGWCR14(val)		MCR14(val, 0, c0, c14, 7)
+#define WCP14_DBGWCR15(val)		MCR14(val, 0, c0, c15, 7)
+#define WCP14_DBGBXVR0(val)		MCR14(val, 0, c1, c0, 1)
+#define WCP14_DBGBXVR1(val)		MCR14(val, 0, c1, c1, 1)
+#define WCP14_DBGBXVR2(val)		MCR14(val, 0, c1, c2, 1)
+#define WCP14_DBGBXVR3(val)		MCR14(val, 0, c1, c3, 1)
+#define WCP14_DBGBXVR4(val)		MCR14(val, 0, c1, c4, 1)
+#define WCP14_DBGBXVR5(val)		MCR14(val, 0, c1, c5, 1)
+#define WCP14_DBGBXVR6(val)		MCR14(val, 0, c1, c6, 1)
+#define WCP14_DBGBXVR7(val)		MCR14(val, 0, c1, c7, 1)
+#define WCP14_DBGBXVR8(val)		MCR14(val, 0, c1, c8, 1)
+#define WCP14_DBGBXVR9(val)		MCR14(val, 0, c1, c9, 1)
+#define WCP14_DBGBXVR10(val)		MCR14(val, 0, c1, c10, 1)
+#define WCP14_DBGBXVR11(val)		MCR14(val, 0, c1, c11, 1)
+#define WCP14_DBGBXVR12(val)		MCR14(val, 0, c1, c12, 1)
+#define WCP14_DBGBXVR13(val)		MCR14(val, 0, c1, c13, 1)
+#define WCP14_DBGBXVR14(val)		MCR14(val, 0, c1, c14, 1)
+#define WCP14_DBGBXVR15(val)		MCR14(val, 0, c1, c15, 1)
+#define WCP14_DBGOSLAR(val)		MCR14(val, 0, c1, c0, 4)
+#define WCP14_DBGOSSRR(val)		MCR14(val, 0, c1, c2, 4)
+#define WCP14_DBGOSDLR(val)		MCR14(val, 0, c1, c3, 4)
+#define WCP14_DBGPRCR(val)		MCR14(val, 0, c1, c4, 4)
+#define WCP14_DBGITCTRL(val)		MCR14(val, 0, c7, c0, 4)
+#define WCP14_DBGCLAIMSET(val)		MCR14(val, 0, c7, c8, 6)
+#define WCP14_DBGCLAIMCLR(val)		MCR14(val, 0, c7, c9, 6)
+
+#endif
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index afcaf8b..e40bbc5 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -52,6 +52,7 @@
 #define ARM_DEBUG_ARCH_V7_MM	4
 #define ARM_DEBUG_ARCH_V7_1	5
 #define ARM_DEBUG_ARCH_V8	6
+#define ARM_DEBUG_ARCH_V8_8	8
 
 /* Breakpoint */
 #define ARM_BREAKPOINT_EXECUTE	0
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 9fe1043..f4dab20 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -78,6 +78,9 @@
 	/* Interrupt controller */
 	struct vgic_dist	vgic;
 	int max_vcpus;
+
+	/* Mandated version of PSCI */
+	u32 psci_version;
 };
 
 #define KVM_NR_MEM_OBJS     40
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index d10e362..7f66b1b 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -223,6 +223,22 @@
 	return 8;
 }
 
+/*
+ * We are not in the kvm->srcu critical section most of the time, so we take
+ * the SRCU read lock here. Since we copy the data from the user page, we
+ * can immediately drop the lock again.
+ */
+static inline int kvm_read_guest_lock(struct kvm *kvm,
+				      gpa_t gpa, void *data, unsigned long len)
+{
+	int srcu_idx = srcu_read_lock(&kvm->srcu);
+	int ret = kvm_read_guest(kvm, gpa, data, len);
+
+	srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+	return ret;
+}
+
 static inline void *kvm_get_hyp_vector(void)
 {
 	return kvm_ksym_ref(__kvm_hyp_vector);
diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h
index d0295f1..ff65b6d 100644
--- a/arch/arm/include/asm/vdso.h
+++ b/arch/arm/include/asm/vdso.h
@@ -11,8 +11,6 @@
 
 void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
 
-extern char vdso_start, vdso_end;
-
 extern unsigned int vdso_total_pages;
 
 #else /* CONFIG_VDSO */
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index b38c10c..0b8cf31 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -173,6 +173,12 @@
 #define KVM_REG_ARM_VFP_FPINST		0x1009
 #define KVM_REG_ARM_VFP_FPINST2		0x100A
 
+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW			(0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r)		(KVM_REG_ARM | KVM_REG_SIZE_U64 | \
+					 KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION	KVM_REG_ARM_FW_REG(0)
+
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR	0
 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS	1
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 2d1d821..42d3974 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -150,10 +150,15 @@
 	}
 
 	c = irq_data_get_irq_chip(d);
-	if (!c->irq_set_affinity)
+	if (!c->irq_set_affinity) {
 		pr_debug("IRQ%u: unable to set affinity\n", d->irq);
-	else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
-		cpumask_copy(irq_data_get_affinity_mask(d), affinity);
+	} else {
+		int r = irq_set_affinity_locked(d, affinity, false);
+
+		if (r)
+			pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
+					    d->irq, r);
+	}
 
 	return ret;
 }
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 5b6cb33..f85665d 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1225,25 +1225,9 @@
 
 static int armv7_probe_pmu(struct arm_pmu *arm_pmu)
 {
-	int ret;
-	struct armv7_pmu_idle_nb *pmu_idle_nb;
-
-	pmu_idle_nb = devm_kzalloc(&arm_pmu->plat_device->dev,
-				sizeof(*pmu_idle_nb), GFP_KERNEL);
-	if (!pmu_idle_nb)
-		return -ENOMEM;
-
-	ret = smp_call_function_any(&arm_pmu->supported_cpus,
+	return smp_call_function_any(&arm_pmu->supported_cpus,
 				     armv7_read_num_pmnc_events,
 				     &arm_pmu->num_events, 1);
-	if (ret)
-		return ret;
-
-	pmu_idle_nb->cpu_pmu = arm_pmu;
-	pmu_idle_nb->perf_cpu_idle_nb.notifier_call = armv7_pmu_idle_notifier;
-	idle_notifier_register(&pmu_idle_nb->perf_cpu_idle_nb);
-
-	return 0;
 }
 
 static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
@@ -1295,6 +1279,16 @@
 	return armv7_probe_pmu(cpu_pmu);
 }
 
+static int armv8_pmuv3_pmu_init(struct arm_pmu *cpu_pmu)
+{
+	armv7pmu_init(cpu_pmu);
+	cpu_pmu->name		= "ARMv8 Cortex-A53";
+	cpu_pmu->map_event	= armv7_a7_map_event;
+	armv7_read_num_pmnc_events(&cpu_pmu->num_events);
+	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
+	return 0;
+}
+
 static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
 {
 	armv7pmu_init(cpu_pmu);
@@ -2065,6 +2059,7 @@
 	{.compatible = "qcom,krait-pmu",	.data = krait_pmu_init},
 	{.compatible = "qcom,scorpion-pmu",	.data = scorpion_pmu_init},
 	{.compatible = "qcom,scorpion-mp-pmu",	.data = scorpion_mp_pmu_init},
+	{.compatible = "arm,armv8-pmuv3",      .data = armv8_pmuv3_pmu_init},
 	{},
 };
 
@@ -2077,8 +2072,24 @@
 
 static int armv7_pmu_device_probe(struct platform_device *pdev)
 {
-	return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
+	int ret;
+	struct armv7_pmu_idle_nb *pmu_idle_nb;
+
+	pmu_idle_nb = devm_kzalloc(&pdev->dev, sizeof(*pmu_idle_nb),
+				    GFP_KERNEL);
+	if (!pmu_idle_nb)
+		return -ENOMEM;
+
+	ret = arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
 				    armv7_pmu_probe_table);
+	if (ret)
+		return ret;
+
+	pmu_idle_nb->cpu_pmu = (struct arm_pmu *) platform_get_drvdata(pdev);
+	pmu_idle_nb->perf_cpu_idle_nb.notifier_call = armv7_pmu_idle_notifier;
+	idle_notifier_register(&pmu_idle_nb->perf_cpu_idle_nb);
+
+	return 0;
 }
 
 static struct platform_driver armv7_pmu_driver = {
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
index 29286fb..8ff6674 100644
--- a/arch/arm/kernel/psci_smp.c
+++ b/arch/arm/kernel/psci_smp.c
@@ -112,6 +112,12 @@
 	return 0;
 }
 
+static bool psci_cpu_can_disable(unsigned int cpu)
+{
+	/*Hotplug of any CPU is supported*/
+	return true;
+}
+
 #endif
 
 bool __init psci_smp_available(void)
@@ -126,5 +132,6 @@
 	.cpu_disable		= psci_cpu_disable,
 	.cpu_die		= psci_cpu_die,
 	.cpu_kill		= psci_cpu_kill,
+	.cpu_can_disable	= psci_cpu_can_disable,
 #endif
 };
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 1b30489..aa316a7 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -19,6 +19,7 @@
 #include <linux/uaccess.h>
 #include <linux/hardirq.h>
 #include <linux/kdebug.h>
+#include <linux/kprobes.h>
 #include <linux/module.h>
 #include <linux/kexec.h>
 #include <linux/bug.h>
@@ -415,7 +416,8 @@
 	raw_spin_unlock_irqrestore(&undef_lock, flags);
 }
 
-static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+static nokprobe_inline
+int call_undef_hook(struct pt_regs *regs, unsigned int instr)
 {
 	struct undef_hook *hook;
 	unsigned long flags;
@@ -488,6 +490,7 @@
 
 	arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
 }
+NOKPROBE_SYMBOL(do_undefinstr)
 
 /*
  * Handle FIQ similarly to NMI on x86 systems.
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index 53cf86c..8904397 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -39,6 +39,8 @@
 
 static struct page **vdso_text_pagelist;
 
+extern char vdso_start[], vdso_end[];
+
 /* Total number of pages needed for the data and text portions of the VDSO. */
 unsigned int vdso_total_pages __ro_after_init;
 
@@ -179,13 +181,13 @@
 	unsigned int text_pages;
 	int i;
 
-	if (memcmp(&vdso_start, "\177ELF", 4)) {
+	if (memcmp(vdso_start, "\177ELF", 4)) {
 		pr_err("VDSO is not a valid ELF object!\n");
 		return -ENOEXEC;
 	}
 
-	text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
-	pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start);
+	text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
+	pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start);
 
 	/* Allocate the VDSO text pagelist */
 	vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
@@ -200,7 +202,7 @@
 	for (i = 0; i < text_pages; i++) {
 		struct page *page;
 
-		page = virt_to_page(&vdso_start + i * PAGE_SIZE);
+		page = virt_to_page(vdso_start + i * PAGE_SIZE);
 		vdso_text_pagelist[i] = page;
 	}
 
@@ -211,7 +213,7 @@
 
 	cntvct_ok = cntvct_functional();
 
-	patch_vdso(&vdso_start);
+	patch_vdso(vdso_start);
 
 	return 0;
 }
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 9aca920..630117d 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -22,6 +22,7 @@
 #include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
+#include <kvm/arm_psci.h>
 #include <asm/cputype.h>
 #include <asm/uaccess.h>
 #include <asm/kvm.h>
@@ -176,6 +177,7 @@
 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
 {
 	return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
+		+ kvm_arm_get_fw_num_regs(vcpu)
 		+ NUM_TIMER_REGS;
 }
 
@@ -196,6 +198,11 @@
 		uindices++;
 	}
 
+	ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
+	if (ret)
+		return ret;
+	uindices += kvm_arm_get_fw_num_regs(vcpu);
+
 	ret = copy_timer_indices(vcpu, uindices);
 	if (ret)
 		return ret;
@@ -214,6 +221,9 @@
 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
 		return get_core_reg(vcpu, reg);
 
+	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+		return kvm_arm_get_fw_reg(vcpu, reg);
+
 	if (is_timer_reg(reg->id))
 		return get_timer_reg(vcpu, reg);
 
@@ -230,6 +240,9 @@
 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
 		return set_core_reg(vcpu, reg);
 
+	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+		return kvm_arm_set_fw_reg(vcpu, reg);
+
 	if (is_timer_reg(reg->id))
 		return set_timer_reg(vcpu, reg);
 
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 3d96225..8a9c654 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -18,6 +18,7 @@
 #include <linux/arm-smccc.h>
 #include <linux/preempt.h>
 #include <linux/kvm_host.h>
+#include <linux/uaccess.h>
 #include <linux/wait.h>
 
 #include <asm/cputype.h>
@@ -425,3 +426,62 @@
 	smccc_set_retval(vcpu, val, 0, 0, 0);
 	return 1;
 }
+
+int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
+{
+	return 1;		/* PSCI version */
+}
+
+int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+	if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
+		return -EFAULT;
+
+	return 0;
+}
+
+int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+	if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
+		void __user *uaddr = (void __user *)(long)reg->addr;
+		u64 val;
+
+		val = kvm_psci_version(vcpu, vcpu->kvm);
+		if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
+			return -EFAULT;
+
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+	if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
+		void __user *uaddr = (void __user *)(long)reg->addr;
+		bool wants_02;
+		u64 val;
+
+		if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
+			return -EFAULT;
+
+		wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
+
+		switch (val) {
+		case KVM_ARM_PSCI_0_1:
+			if (wants_02)
+				return -EINVAL;
+			vcpu->kvm->arch.psci_version = val;
+			return 0;
+		case KVM_ARM_PSCI_0_2:
+		case KVM_ARM_PSCI_1_0:
+			if (!wants_02)
+				return -EINVAL;
+			vcpu->kvm->arch.psci_version = val;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index df73914..746e780 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -38,6 +38,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_1)
+_ASM_NOKPROBE(__get_user_1)
 
 ENTRY(__get_user_2)
 	check_uaccess r0, 2, r1, r2, __get_user_bad
@@ -58,6 +59,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_2)
+_ASM_NOKPROBE(__get_user_2)
 
 ENTRY(__get_user_4)
 	check_uaccess r0, 4, r1, r2, __get_user_bad
@@ -65,6 +67,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_4)
+_ASM_NOKPROBE(__get_user_4)
 
 ENTRY(__get_user_8)
 	check_uaccess r0, 8, r1, r2, __get_user_bad8
@@ -78,6 +81,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_8)
+_ASM_NOKPROBE(__get_user_8)
 
 #ifdef __ARMEB__
 ENTRY(__get_user_32t_8)
@@ -91,6 +95,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_32t_8)
+_ASM_NOKPROBE(__get_user_32t_8)
 
 ENTRY(__get_user_64t_1)
 	check_uaccess r0, 1, r1, r2, __get_user_bad8
@@ -98,6 +103,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_64t_1)
+_ASM_NOKPROBE(__get_user_64t_1)
 
 ENTRY(__get_user_64t_2)
 	check_uaccess r0, 2, r1, r2, __get_user_bad8
@@ -114,6 +120,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_64t_2)
+_ASM_NOKPROBE(__get_user_64t_2)
 
 ENTRY(__get_user_64t_4)
 	check_uaccess r0, 4, r1, r2, __get_user_bad8
@@ -121,6 +128,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_64t_4)
+_ASM_NOKPROBE(__get_user_64t_4)
 #endif
 
 __get_user_bad8:
@@ -131,6 +139,8 @@
 	ret	lr
 ENDPROC(__get_user_bad)
 ENDPROC(__get_user_bad8)
+_ASM_NOKPROBE(__get_user_bad)
+_ASM_NOKPROBE(__get_user_bad8)
 
 .pushsection __ex_table, "a"
 	.long	1b, __get_user_bad
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
index 4f5fd4a..034b894 100644
--- a/arch/arm/mach-omap1/clock.c
+++ b/arch/arm/mach-omap1/clock.c
@@ -1031,17 +1031,17 @@
 		return -ENOMEM;
 	c->dent = d;
 
-	d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
+	d = debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount);
 	if (!d) {
 		err = -ENOMEM;
 		goto err_out;
 	}
-	d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
+	d = debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate);
 	if (!d) {
 		err = -ENOMEM;
 		goto err_out;
 	}
-	d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
+	d = debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags);
 	if (!d) {
 		err = -ENOMEM;
 		goto err_out;
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 678d2a3..3202015 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -225,7 +225,7 @@
 	cpu_idle_poll_ctrl(false);
 }
 
-static void omap_pm_finish(void)
+static void omap_pm_wake(void)
 {
 	if (cpu_is_omap34xx())
 		omap_prcm_irq_complete();
@@ -235,7 +235,7 @@
 	.begin		= omap_pm_begin,
 	.end		= omap_pm_end,
 	.enter		= omap_pm_enter,
-	.finish		= omap_pm_finish,
+	.wake		= omap_pm_wake,
 	.valid		= suspend_valid_only_mem,
 };
 
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index b2f2448..a4cab28 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -136,12 +136,6 @@
 	.tick_resume		= omap2_gp_timer_shutdown,
 };
 
-static struct property device_disabled = {
-	.name = "status",
-	.length = sizeof("disabled"),
-	.value = "disabled",
-};
-
 static const struct of_device_id omap_timer_match[] __initconst = {
 	{ .compatible = "ti,omap2420-timer", },
 	{ .compatible = "ti,omap3430-timer", },
@@ -183,8 +177,17 @@
 				  of_get_property(np, "ti,timer-secure", NULL)))
 			continue;
 
-		if (!of_device_is_compatible(np, "ti,omap-counter32k"))
-			of_add_property(np, &device_disabled);
+		if (!of_device_is_compatible(np, "ti,omap-counter32k")) {
+			struct property *prop;
+
+			prop = kzalloc(sizeof(*prop), GFP_KERNEL);
+			if (!prop)
+				return NULL;
+			prop->name = "status";
+			prop->value = "disabled";
+			prop->length = strlen(prop->value);
+			of_add_property(np, prop);
+		}
 		return np;
 	}
 
diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig
index 89bb0fc..72905a4 100644
--- a/arch/arm/mach-orion5x/Kconfig
+++ b/arch/arm/mach-orion5x/Kconfig
@@ -57,7 +57,6 @@
 
 config MACH_DNS323
 	bool "D-Link DNS-323"
-	select GENERIC_NET_UTILS
 	select I2C_BOARDINFO if I2C
 	help
 	  Say 'Y' here if you want your kernel to support the
@@ -65,7 +64,6 @@
 
 config MACH_TS209
 	bool "QNAP TS-109/TS-209"
-	select GENERIC_NET_UTILS
 	help
 	  Say 'Y' here if you want your kernel to support the
 	  QNAP TS-109/TS-209 platform.
@@ -107,7 +105,6 @@
 
 config MACH_TS409
 	bool "QNAP TS-409"
-	select GENERIC_NET_UTILS
 	help
 	  Say 'Y' here if you want your kernel to support the
 	  QNAP TS-409 platform.
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index cd483bf..d13344b 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -173,10 +173,42 @@
 	.phy_addr = MV643XX_ETH_PHY_ADDR(8),
 };
 
+/* dns323_parse_hex_*() taken from tsx09-common.c; should a common copy of these
+ * functions be kept somewhere?
+ */
+static int __init dns323_parse_hex_nibble(char n)
+{
+	if (n >= '0' && n <= '9')
+		return n - '0';
+
+	if (n >= 'A' && n <= 'F')
+		return n - 'A' + 10;
+
+	if (n >= 'a' && n <= 'f')
+		return n - 'a' + 10;
+
+	return -1;
+}
+
+static int __init dns323_parse_hex_byte(const char *b)
+{
+	int hi;
+	int lo;
+
+	hi = dns323_parse_hex_nibble(b[0]);
+	lo = dns323_parse_hex_nibble(b[1]);
+
+	if (hi < 0 || lo < 0)
+		return -1;
+
+	return (hi << 4) | lo;
+}
+
 static int __init dns323_read_mac_addr(void)
 {
 	u_int8_t addr[6];
-	void __iomem *mac_page;
+	int i;
+	char *mac_page;
 
 	/* MAC address is stored as a regular ol' string in /dev/mtdblock4
 	 * (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80).
@@ -185,8 +217,23 @@
 	if (!mac_page)
 		return -ENOMEM;
 
-	if (!mac_pton((__force const char *) mac_page, addr))
-		goto error_fail;
+	/* Sanity check the string we're looking at */
+	for (i = 0; i < 5; i++) {
+		if (*(mac_page + (i * 3) + 2) != ':') {
+			goto error_fail;
+		}
+	}
+
+	for (i = 0; i < 6; i++)	{
+		int byte;
+
+		byte = dns323_parse_hex_byte(mac_page + (i * 3));
+		if (byte < 0) {
+			goto error_fail;
+		}
+
+		addr[i] = byte;
+	}
 
 	iounmap(mac_page);
 	printk("DNS-323: Found ethernet MAC address: %pM\n", addr);
diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c
index 8977498..905d4f2 100644
--- a/arch/arm/mach-orion5x/tsx09-common.c
+++ b/arch/arm/mach-orion5x/tsx09-common.c
@@ -53,12 +53,53 @@
 	.phy_addr	= MV643XX_ETH_PHY_ADDR(8),
 };
 
+static int __init qnap_tsx09_parse_hex_nibble(char n)
+{
+	if (n >= '0' && n <= '9')
+		return n - '0';
+
+	if (n >= 'A' && n <= 'F')
+		return n - 'A' + 10;
+
+	if (n >= 'a' && n <= 'f')
+		return n - 'a' + 10;
+
+	return -1;
+}
+
+static int __init qnap_tsx09_parse_hex_byte(const char *b)
+{
+	int hi;
+	int lo;
+
+	hi = qnap_tsx09_parse_hex_nibble(b[0]);
+	lo = qnap_tsx09_parse_hex_nibble(b[1]);
+
+	if (hi < 0 || lo < 0)
+		return -1;
+
+	return (hi << 4) | lo;
+}
+
 static int __init qnap_tsx09_check_mac_addr(const char *addr_str)
 {
 	u_int8_t addr[6];
+	int i;
 
-	if (!mac_pton(addr_str, addr))
-		return -1;
+	for (i = 0; i < 6; i++) {
+		int byte;
+
+		/*
+		 * Enforce "xx:xx:xx:xx:xx:xx\n" format.
+		 */
+		if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n'))
+			return -1;
+
+		byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3));
+		if (byte < 0)
+			return -1;
+		addr[i] = byte;
+	}
 
 	printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr);
 
@@ -77,12 +118,12 @@
 	unsigned long addr;
 
 	for (addr = mem_base; addr < (mem_base + size); addr += 1024) {
-		void __iomem *nor_page;
+		char *nor_page;
 		int ret = 0;
 
 		nor_page = ioremap(addr, 1024);
 		if (nor_page != NULL) {
-			ret = qnap_tsx09_check_mac_addr((__force const char *)nor_page);
+			ret = qnap_tsx09_check_mac_addr(nor_page);
 			iounmap(nor_page);
 		}
 
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index b055b60..da48623 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -94,7 +94,7 @@
 	select MAY_HAVE_SPARSE_IRQ
 	select PINCTRL_MSM_TLMM
 	select USE_PINCTRL_IRQ
-	select MSM_PM if PM
+	select MSM_PM_LEGACY if PM
 	select MSM_RPM_SMD
 	select MSM_RPM_STATS_LOG
 	select MSM_RPM_LOG
diff --git a/arch/arm/mach-qcom/Makefile b/arch/arm/mach-qcom/Makefile
index 5b93fa3..2f26f39 100644
--- a/arch/arm/mach-qcom/Makefile
+++ b/arch/arm/mach-qcom/Makefile
@@ -1,5 +1,6 @@
 obj-$(CONFIG_USE_OF) += board-dt.o
 obj-$(CONFIG_SMP)	+= platsmp.o
+obj-$(CONFIG_MSM_PM_LEGACY)	+=hotplug.o
 obj-$(CONFIG_ARCH_SDXPOORWILLS) += board-poorwills.o
 obj-$(CONFIG_ARCH_MSM8953) += board-msm8953.o
 obj-$(CONFIG_ARCH_MSM8937) += board-msm8937.o
diff --git a/arch/arm/mach-qcom/board-msm8917.c b/arch/arm/mach-qcom/board-msm8917.c
index 63bc43b..0bd6984 100644
--- a/arch/arm/mach-qcom/board-msm8917.c
+++ b/arch/arm/mach-qcom/board-msm8917.c
@@ -17,6 +17,7 @@
 
 static const char *msm8917_dt_match[] __initconst = {
 	"qcom,msm8917",
+	"qcom,apq8017",
 	NULL
 };
 
diff --git a/arch/arm/mach-qcom/board-msm8953.c b/arch/arm/mach-qcom/board-msm8953.c
index de4538f..9a82e3a 100644
--- a/arch/arm/mach-qcom/board-msm8953.c
+++ b/arch/arm/mach-qcom/board-msm8953.c
@@ -37,8 +37,8 @@
 	/* Explicitly parent the /soc devices to the root node to preserve
 	 * the kernel ABI (sysfs structure, etc) until userspace is updated
 	 */
-	of_platform_populate(of_find_node_by_path("/soc"),
-			     of_default_bus_match_table, NULL, NULL);
+	return of_platform_populate(of_find_node_by_path("/soc"),
+				of_default_bus_match_table, NULL, NULL);
 }
 late_initcall(msm8953_dt_populate);
 #endif
diff --git a/arch/arm/mach-qcom/hotplug.c b/arch/arm/mach-qcom/hotplug.c
new file mode 100644
index 0000000..c038f4b
--- /dev/null
+++ b/arch/arm/mach-qcom/hotplug.c
@@ -0,0 +1,109 @@
+/*
+ *  Copyright (C) 2002 ARM Ltd.
+ *  All Rights Reserved
+ *  Copyright (c) 2011-2014, 2016, 2018, The Linux Foundation.
+ *  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm-legacy.h>
+#include <asm/smp_plat.h>
+#include "platsmp.h"
+#include <soc/qcom/jtag.h>
+
+static cpumask_t cpu_dying_mask;
+static DEFINE_PER_CPU(unsigned int, warm_boot_flag);
+
+static inline void cpu_enter_lowpower(void)
+{
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+}
+
+static inline void platform_do_lowpower(unsigned int cpu)
+{
+	lpm_cpu_hotplug_enter(cpu);
+	/*
+	 * getting here, means that we have come out of low power mode
+	 * without having been woken up - this shouldn't happen
+	 *
+	 */
+	pr_err("%s: CPU%u has failed to Hotplug\n", __func__, cpu);
+}
+
+int qcom_cpu_kill_legacy(unsigned int cpu)
+{
+	int ret = 0;
+
+	if (cpumask_test_and_clear_cpu(cpu, &cpu_dying_mask))
+		ret = msm_pm_wait_cpu_shutdown(cpu);
+
+	return ret ? 0 : 1;
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void __ref qcom_cpu_die_legacy(unsigned int cpu)
+{
+	if (unlikely(cpu != smp_processor_id())) {
+		pr_crit("%s: running on %u, should be %u\n",
+			__func__, smp_processor_id(), cpu);
+		WARN_ON(cpu);
+	}
+	/*
+	 * we're ready for shutdown now, so do it
+	 */
+	cpu_enter_lowpower();
+	platform_do_lowpower(cpu);
+
+	pr_debug("CPU%u: %s: normal wakeup\n", cpu, __func__);
+	cpu_leave_lowpower();
+}
+
+int msm_platform_secondary_init(unsigned int cpu)
+{
+	int ret;
+	unsigned int *warm_boot = this_cpu_ptr(&warm_boot_flag);
+
+	if (!(*warm_boot)) {
+		*warm_boot = 1;
+		/*
+		 * All CPU0 boots are considered warm boots (restore needed)
+		 * since CPU0 is the system boot CPU and never cold-booted
+		 * by the kernel.
+		 */
+		if (cpu)
+			return 0;
+	}
+	msm_jtag_restore_state();
+	ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
+
+	return ret;
+}
+
+static int hotplug_dying_cpu(unsigned int cpu)
+{
+	cpumask_set_cpu(cpu, &cpu_dying_mask);
+	return 0;
+}
+
+static int __init init_hotplug_dying(void)
+{
+	cpuhp_setup_state(CPUHP_AP_QCOM_SLEEP_STARTING,
+		 "AP_QCOM_HOTPLUG_STARTING", NULL, hotplug_dying_cpu);
+
+	return 0;
+}
+early_initcall(init_hotplug_dying);
diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
index c422ac3..803804d 100644
--- a/arch/arm/mach-qcom/platsmp.c
+++ b/arch/arm/mach-qcom/platsmp.c
@@ -22,6 +22,9 @@
 #include <asm/smp_plat.h>
 #include <asm/fixmap.h>
 #include "platsmp.h"
+#ifdef CONFIG_MSM_PM_LEGACY
+#include <soc/qcom/pm-legacy.h>
+#endif
 
 #define MSM_APCS_IDR 0x0B011030
 
@@ -62,10 +65,18 @@
 {
 	wfi();
 }
+
+static bool qcom_cpu_can_disable(unsigned int cpu)
+{
+	return true; /*Hotplug of any CPU is supported */
+}
 #endif
 
 static void qcom_secondary_init(unsigned int cpu)
 {
+#ifdef CONFIG_MSM_PM_LEGACY
+	WARN_ON(msm_platform_secondary_init(cpu));
+#endif
 	/*
 	 * Synchronise with the boot thread.
 	 */
@@ -472,8 +483,14 @@
 	.smp_secondary_init = qcom_secondary_init,
 	.smp_boot_secondary = msm8909_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_MSM_PM_LEGACY
+	.cpu_die		= qcom_cpu_die_legacy,
+	.cpu_kill		= qcom_cpu_kill_legacy,
+#else
 	.cpu_die		= qcom_cpu_die,
 #endif
+	.cpu_can_disable	= qcom_cpu_can_disable,
+#endif
 };
 
 CPU_METHOD_OF_DECLARE(qcom_smp_8909, "qcom,apss-8909", &msm8909_smp_ops);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index b57aafc..f216025 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2372,6 +2372,7 @@
 
 	mapping->nr_bitmaps = 1;
 	mapping->extensions = extensions;
+	mapping->bits = BITS_PER_BYTE * bitmap_size;
 
 	spin_lock_init(&mapping->lock);
 	mapping->ops = &iommu_ops;
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 7a327bd..ebef8aa 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -890,11 +890,8 @@
 	timer->irq = irq->start;
 	timer->pdev = pdev;
 
-	/* Skip pm_runtime_enable for OMAP1 */
-	if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
-		pm_runtime_enable(dev);
-		pm_runtime_irq_safe(dev);
-	}
+	pm_runtime_enable(dev);
+	pm_runtime_irq_safe(dev);
 
 	if (!timer->reserved) {
 		ret = pm_runtime_get_sync(dev);
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
index bcdecc2..b2aa9b3 100644
--- a/arch/arm/probes/kprobes/opt-arm.c
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -165,13 +165,14 @@
 {
 	unsigned long flags;
 	struct kprobe *p = &op->kp;
-	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	struct kprobe_ctlblk *kcb;
 
 	/* Save skipped registers */
 	regs->ARM_pc = (unsigned long)op->kp.addr;
 	regs->ARM_ORIG_r0 = ~0UL;
 
 	local_irq_save(flags);
+	kcb = get_kprobe_ctlblk();
 
 	if (kprobe_running()) {
 		kprobes_inc_nmissed_count(&op->kp);
@@ -191,6 +192,7 @@
 
 	local_irq_restore(flags);
 }
+NOKPROBE_SYMBOL(optimized_callback)
 
 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
 {
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9c01a31..80f39e9 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -15,7 +15,24 @@
 	select ARCH_HAS_KCOV
 	select ARCH_HAS_SG_CHAIN
 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+	select ARCH_INLINE_READ_LOCK if !PREEMPT
+	select ARCH_INLINE_READ_LOCK_BH if !PREEMPT
+	select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT
+	select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT
+	select ARCH_INLINE_READ_UNLOCK if !PREEMPT
+	select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT
+	select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT
+	select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT
+	select ARCH_INLINE_WRITE_LOCK if !PREEMPT
+	select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT
+	select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT
+	select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT
+	select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT
+	select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
+	select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
+	select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
 	select ARCH_USE_CMPXCHG_LOCKREF
+	select ARCH_USE_QUEUED_RWLOCKS
 	select ARCH_SUPPORTS_LTO_CLANG
 	select ARCH_SUPPORTS_ATOMIC_RMW
 	select ARCH_SUPPORTS_NUMA_BALANCING
diff --git a/arch/arm64/boot/dts/qcom/8909-pm8916.dtsi b/arch/arm64/boot/dts/qcom/8909-pm8916.dtsi
index 3247d0d..b073d99 100644
--- a/arch/arm64/boot/dts/qcom/8909-pm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/8909-pm8916.dtsi
@@ -185,16 +185,6 @@
 	qcom,mdss_dsi_pll@1ac8300 {
 		vddio-supply = <&pm8916_l6>;
 	};
-
-	qcom,msm-thermal {
-		vdd-dig-supply = <&pm8916_s1_floor_corner>;
-
-		qcom,vdd-apps-rstr {
-			qcom,vdd-rstr-reg = "vdd-apps";
-			qcom,levels = <800000>;
-			qcom,freq-req;
-		};
-	};
 };
 
 
@@ -296,6 +286,7 @@
 		qcom,scale-function = <2>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
 	};
 
 	chan@32 {
@@ -307,6 +298,7 @@
 		qcom,scale-function = <4>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
 	};
 
 	chan@3c {
@@ -318,6 +310,7 @@
 		qcom,scale-function = <4>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
 	};
 };
 
@@ -350,3 +343,86 @@
 
 #include "msm8909-pm8916-pm.dtsi"
 
+&soc {
+	thermal-zones {
+		xo-therm-buf-adc {
+			polling-delay-passive = <0>;
+			polling-delay = <1000>;
+			thermal-sensors = <&pm8916_vadc 0x3c>;
+			thermal-governor = "user_space";
+
+			trips {
+				active-config0 {
+					temperature = <65000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		xo-therm-adc {
+			polling-delay-passive = <0>;
+			polling-delay = <1000>;
+			thermal-sensors = <&pm8916_vadc 0x32>;
+			thermal-governor = "user_space";
+
+			trips {
+				active-config0 {
+					temperature = <65000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		pa-therm0-adc {
+			polling-delay-passive = <0>;
+			polling-delay = <1000>;
+			thermal-sensors = <&pm8916_vadc 0x36>;
+			thermal-governor = "user_space";
+
+			trips {
+				active-config0 {
+					temperature = <65000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+		mdm-lowf {
+			cooling-maps {
+				cx_vdd_cdev {
+					cooling-device = <&pm8916_cx_cdev 0 0>;
+				};
+			};
+		};
+		camera-lowf {
+			cooling-maps {
+				cx_vdd_cdev {
+					cooling-device = <&pm8916_cx_cdev 0 0>;
+				};
+			};
+		};
+		gpu-lowf {
+			cooling-maps {
+				cx_vdd_cdev {
+					cooling-device = <&pm8916_cx_cdev 0 0>;
+				};
+			};
+		};
+		cpu0-2-lowf {
+			cooling-maps {
+				cx_vdd_cdev {
+					cooling-device = <&pm8916_cx_cdev 0 0>;
+				};
+			};
+		};
+		cpu1-3-lowf {
+			cooling-maps {
+				cx_vdd_cdev {
+					cooling-device = <&pm8916_cx_cdev 0 0>;
+				};
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 98238d9..3ad1c0e 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -135,17 +135,23 @@
 		sdm670-usbc-external-codec-pm660a-mtp-overlay.dtbo \
 		sda670-cdp-overlay.dtbo \
 		sda670-mtp-overlay.dtbo \
+		sda670-hdk-overlay.dtbo \
 		sda670-pm660a-cdp-overlay.dtbo \
 		sda670-pm660a-mtp-overlay.dtbo \
 		sdm670-tasha-codec-cdp-overlay.dtbo \
 		sdm670-pm660a-tasha-codec-cdp-overlay.dtbo \
 		sdm670-aqt1000-cdp-overlay.dtbo \
 		sdm670-pm660a-aqt1000-cdp-overlay.dtbo \
+		sxr1120-lc-mtp-overlay.dtbo \
+		sxr1120-lc-external-codec-mtp-overlay.dtbo \
+		sxr1120-lc-cdp-overlay.dtbo \
+		sxr1120-lc-external-codec-cdp-overlay.dtbo \
 		qcs605-cdp-overlay.dtbo \
 		qcs605-mtp-overlay.dtbo \
 		qcs605-360camera-overlay.dtbo \
 		qcs605-external-codec-mtp-overlay.dtbo \
 		qcs605-lc-mtp-overlay.dtbo \
+		qcs605-lc-cdp-overlay.dtbo \
 		sdm710-cdp-overlay.dtbo \
 		sdm710-mtp-overlay.dtbo \
 		sdm710-qrd-overlay.dtbo \
@@ -194,13 +200,19 @@
 sdm670-pm660a-aqt1000-cdp-overlay.dtbo-base := sdm670.dtb
 sda670-cdp-overlay.dtbo-base := sda670.dtb
 sda670-mtp-overlay.dtbo-base := sda670.dtb
+sda670-hdk-overlay.dtbo-base := sda670.dtb
 sda670-pm660a-cdp-overlay.dtbo-base := sda670.dtb
 sda670-pm660a-mtp-overlay.dtbo-base := sda670.dtb
+sxr1120-lc-mtp-overlay.dtbo-base := sxr1120-lc.dtb
+sxr1120-lc-external-codec-mtp-overlay.dtbo-base := sxr1120-lc.dtb
+sxr1120-lc-cdp-overlay.dtbo-base := sxr1120-lc.dtb
+sxr1120-lc-external-codec-cdp-overlay.dtbo-base := sxr1120-lc.dtb
 qcs605-cdp-overlay.dtbo-base := qcs605.dtb
 qcs605-mtp-overlay.dtbo-base := qcs605.dtb
 qcs605-external-codec-mtp-overlay.dtbo-base := qcs605.dtb
 qcs605-lc-mtp-overlay.dtbo-base := qcs605-lc.dtb
 qcs605-360camera-overlay.dtbo-base := qcs605.dtb
+qcs605-lc-cdp-overlay.dtbo-base := qcs605-lc-cdp-base.dtb
 sdm710-cdp-overlay.dtbo-base := sdm710.dtb
 sdm710-mtp-overlay.dtbo-base := sdm710.dtb
 sdm710-qrd-overlay.dtbo-base := sdm710.dtb
@@ -245,16 +257,22 @@
 	sdm670-usbc-pm660a-cdp.dtb \
 	sdm670-usbc-pm660a-mtp.dtb \
 	sda670-mtp.dtb \
+	sda670-hdk.dtb \
 	sda670-cdp.dtb \
 	sdm670-tasha-codec-cdp.dtb \
 	sdm670-pm660a-tasha-codec-cdp.dtb \
 	sda670-pm660a-mtp.dtb \
 	sda670-pm660a-cdp.dtb \
+	sxr1120-lc-mtp.dtb \
+	sxr1120-lc-external-codec-mtp.dtb \
+	sxr1120-lc-cdp.dtb \
+	sxr1120-lc-external-codec-cdp.dtb \
 	qcs605-360camera.dtb \
 	qcs605-mtp.dtb \
 	qcs605-cdp.dtb \
 	qcs605-external-codec-mtp.dtb \
 	qcs605-lc-mtp.dtb \
+	qcs605-lc-cdp.dtb \
 	sdm710-mtp.dtb \
 	sdm710-cdp.dtb \
 	sdm710-qrd.dtb \
@@ -309,7 +327,8 @@
 dtbo-$(CONFIG_ARCH_SDM439) += sdm439-mtp-overlay.dtbo \
 	sdm439-cdp-overlay.dtbo \
 	sdm439-qrd-overlay.dtbo \
-	sdm439-external-codec-mtp-overlay.dtbo
+	sdm439-external-codec-mtp-overlay.dtbo \
+	sdm439-rcm-overlay.dtbo
 
 dtbo-$(CONFIG_ARCH_SDM429) += sdm429-mtp-overlay.dtbo \
 	sdm429-cdp-overlay.dtbo \
@@ -377,6 +396,7 @@
 sdm439-qrd-overlay.dtbo-base := sdm439.dtb \
 	msm8937-interposer-sdm439.dtb
 sdm439-external-codec-mtp-overlay.dtbo-base := sdm439.dtb
+sdm439-rcm-overlay.dtbo-base := sdm439.dtb
 sdm429-mtp-overlay.dtbo-base := sdm429.dtb \
 	sda429.dtb \
 	msm8937-interposer-sdm429.dtb
@@ -404,10 +424,10 @@
 	apq8053-iot-mtp.dtb \
 	apq8053-lite-dragon-v1.0.dtb \
 	apq8053-lite-dragon-v2.0.dtb \
-	apq8053-lite-lenovo-v1.0.dtb \
-	apq8053-lite-lenovo-v1.1.dtb \
-	apq8053-lite-harman-v1.0.dtb \
-	apq8053-lite-lge-v1.0.dtb \
+	apq8053-lite-dragon-v2.1.dtb \
+	apq8053-lite-dragon-v2.2.dtb \
+	apq8053-lite-dragon-v2.3.dtb \
+	apq8053-lite-dragon-v2.4.dtb \
 	msm8953-pmi8940-cdp.dtb \
 	msm8953-pmi8940-mtp.dtb \
 	msm8953-pmi8937-cdp.dtb \
@@ -447,7 +467,8 @@
 	apq8009w-bg-alpha.dtb \
 	apq8009-mtp-wcd9326-refboard.dtb \
 	apq8009-robot-som-refboard.dtb \
-	apq8009-dragon.dtb
+	apq8009-dragon.dtb \
+	apq8009-lat-v1.0.dtb
 
 dtb-$(CONFIG_ARCH_SDM450) += sdm450-rcm.dtb \
 	sdm450-cdp.dtb \
@@ -485,7 +506,8 @@
 	sdm439-qrd.dtb \
 	sda439-mtp.dtb \
 	sda439-cdp.dtb \
-	sdm439-external-codec-mtp.dtb
+	sdm439-external-codec-mtp.dtb \
+	sdm439-rcm.dtb
 
 dtb-$(CONFIG_ARCH_SDM429) += sdm429-mtp.dtb \
 	sdm429-cdp.dtb \
diff --git a/arch/arm64/boot/dts/qcom/apq8009-audio-external_codec.dtsi b/arch/arm64/boot/dts/qcom/apq8009-audio-external_codec.dtsi
index d28e139..9261dfe 100644
--- a/arch/arm64/boot/dts/qcom/apq8009-audio-external_codec.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8009-audio-external_codec.dtsi
@@ -19,6 +19,12 @@
 };
 
 &soc {
+	qcom,msm-audio-apr {
+		compatible = "qcom,msm-audio-apr";
+		msm_audio_apr_dummy {
+			compatible = "qcom,msm-audio-apr-dummy";
+		};
+	};
 	sound-9335 {
 		compatible = "qcom,apq8009-audio-i2s-codec";
 		qcom,model = "apq8009-tashalite-snd-card";
@@ -234,16 +240,17 @@
 		qcom,msm-cpudai-tdm-group-num-ports = <1>;
 		qcom,msm-cpudai-tdm-group-port-id = <36864>;
 		qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <0>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <0>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
 		qcom,msm-cpudai-tdm-sec-port-enable;
 		qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>;
 		dai_pri_tdm_rx_0: qcom,msm-dai-q6-tdm-pri-rx-0 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36864>;
-			qcom,msm-cpudai-tdm-sync-mode = <0>;
-			qcom,msm-cpudai-tdm-sync-src = <1>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <0>;
-			qcom,msm-cpudai-tdm-data-delay = <1>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 	};
@@ -254,17 +261,26 @@
 		qcom,msm-cpudai-tdm-group-num-ports = <1>;
 		qcom,msm-cpudai-tdm-group-port-id = <36865>;
 		qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <0>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <0>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
 		qcom,msm-cpudai-tdm-sec-port-enable;
 		qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>;
 		dai_pri_tdm_tx_0: qcom,msm-dai-q6-tdm-pri-tx-0 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36865>;
-			qcom,msm-cpudai-tdm-sync-mode = <0>;
-			qcom,msm-cpudai-tdm-sync-src = <1>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <0>;
-			qcom,msm-cpudai-tdm-data-delay = <1>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 	};
 };
+
+&wcd9xxx_intc {
+	status = "okay";
+};
+
+&clock_audio {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8009-dragon.dts b/arch/arm64/boot/dts/qcom/apq8009-dragon.dts
index 12a4363..ba12854 100644
--- a/arch/arm64/boot/dts/qcom/apq8009-dragon.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009-dragon.dts
@@ -71,10 +71,88 @@
 		compatible = "qca,qca9379";
 		qca,bt-reset-gpio = <&msm_gpio 47 0>; /* BT_EN */
 	};
+
+	cnss_sdio: qcom,cnss_sdio {
+		compatible = "qcom,cnss_sdio";
+		subsys-name = "AR6320";
+		/**
+		 * There is no vdd-wlan on board and this is not for DSRC.
+		 * IO and XTAL share the same vreg.
+		 **/
+		vdd-wlan-io-supply = <&pm8916_l5>;
+		qcom,cap-tsf-gpio = <&msm_gpio 42 1>;
+		qcom,wlan-ramdump-dynamic = <0x200000>;
+		qcom,msm-bus,name = "msm-cnss";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<79 512 0 0>,             /* No vote */
+				<79 512 6250 200000>,     /* 50 Mbps */
+				<79 512 25000 200000>,    /* 200 Mbps */
+				<79 512 2048000 4096000>; /* MAX */
+	};
+};
+
+&wcnss {
+	status = "disabled";
+};
+
+&msm_gpio {
+	sdc2_wlan_gpio_on: sdc2_wlan_gpio_on {
+		mux {
+			pins = "gpio43";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio43";
+			drive-strength = <10>;
+			bias-pull-up;
+			output-high;
+		};
+	};
+
+	sdc2_wlan_gpio_off: sdc2_wlan_gpio_off {
+		mux {
+			pins = "gpio43";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio43";
+			drive-strength = <2>;
+			bias-disable;
+			output-low;
+		};
+	};
 };
 
 &sdhc_2 {
-	status = "disabled";
+	/delete-property/cd-gpios;
+	#address-cells = <0>;
+	interrupt-parent = <&sdhc_2>;
+	interrupts = <0 1 2>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0xffffffff>;
+	interrupt-map = <0 &intc 0 125 0
+			1 &intc 0 221 0
+			2 &msm_gpio 40 0x1>;
+	interrupt-names = "hc_irq", "pwr_irq", "sdiowakeup_irq";
+
+	qcom,vdd-voltage-level = <1800000 2950000>;
+	qcom,vdd-current-level = <15000 400000>;
+
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <200 50000>;
+	qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on
+			&sdc2_wlan_gpio_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off
+			&sdc2_wlan_gpio_off>;
+	qcom,nonremovable;
+	qcom,core_3_0v_support;
+	status = "ok";
 };
 
 &usb_otg {
diff --git a/arch/arm64/boot/dts/qcom/apq8009-lat-v1.0.dts b/arch/arm64/boot/dts/qcom/apq8009-lat-v1.0.dts
new file mode 100644
index 0000000..f81e369
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8009-lat-v1.0.dts
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8909-mtp.dtsi"
+#include "8909-pm8916.dtsi"
+#include "msm8909-pm8916-mtp.dtsi"
+#include "apq8009-audio-external_codec.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. APQ8009-lat-v1.0 Board";
+	compatible = "qcom,apq8009-mtp", "qcom,apq8009", "qcom,mtp";
+	qcom,msm-id = <265 2>;
+	qcom,board-id= <10 0x1>;
+
+	bluetooth: bt_qca9379 {
+		compatible = "qca,qca9379";
+		qca,bt-reset-gpio = <&msm_gpio 47 0x0>; /* BT_EN */
+	};
+};
+
+&soc {
+	ext-codec {
+		qcom,msm-mbhc-hphl-swh = <0>;
+		qcom,audio-routing =
+			"AIF4 VI", "MCLK",
+			"RX_BIAS", "MCLK",
+			"MADINPUT", "MCLK",
+			"AMIC2", "MIC BIAS2",
+			"MIC BIAS2", "Headset Mic",
+			"DMIC0", "MIC BIAS1",
+			"MIC BIAS1", "Digital Mic0",
+			"DMIC1", "MIC BIAS1",
+			"MIC BIAS1", "Digital Mic1",
+			"DMIC2", "MIC BIAS3",
+			"MIC BIAS3", "Digital Mic2",
+			"DMIC3", "MIC BIAS3",
+			"MIC BIAS3", "Digital Mic3",
+			"SpkrLeft IN", "SPK1 OUT",
+			"SpkrRight IN", "SPK2 OUT";
+	};
+
+	sound-9335 {
+		status = "disabled";
+	};
+
+	i2c@78b8000 {
+		wcd9xxx_codec@d {
+			status = "disabled";
+		};
+	};
+
+	vph_pwr_vreg: vph_pwr_vreg {
+		compatible = "regulator-fixed";
+		status = "ok";
+		regulator-name = "vph_pwr";
+		regulator-always-on;
+	};
+
+	mdss_mdp: qcom,mdss_mdp@1a00000 {
+		status = "disabled";
+	};
+
+	qcom,msm-thermal {
+		qcom,core-control-mask = <0xa>;
+		qcom,freq-mitigation-value = <1190400>;
+		qcom,freq-mitigation-control-mask = <0x05>;
+	};
+};
+
+&sdhc_2 {
+	status = "disabled";
+};
+
+&usb_otg {
+	interrupts = <0 134 0>, <0 140 0>, <0 136 0>;
+	interrupt-names = "core_irq", "async_irq", "phy_irq";
+	qcom,hsusb-otg-mode = <3>;
+	qcom,phy-id-high-as-peripheral;
+	vbus_otg-supply = <&vph_pwr_vreg>;
+};
+
+&external_image_mem {
+	reg = <0x0 0x87a00000 0x0 0x0600000>;
+};
+
+&modem_adsp_mem {
+	reg = <0x0 0x88000000 0x0 0x01e00000>;
+};
+
+&peripheral_mem {
+	reg = <0x0 0x89e00000 0x0 0x0700000>;
+};
+
+&i2c_4 {
+	smb1360_otg_supply: smb1360-chg-fg@14 {
+		compatible = "qcom,smb1360-chg-fg";
+		reg = <0x14>;
+		interrupt-parent = <&msm_gpio>;
+		interrupts = <58 8>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&smb_int_default>;
+		qcom,charging-disabled;
+		qcom,empty-soc-disabled;
+		qcom,chg-inhibit-disabled;
+		qcom,float-voltage-mv = <4200>;
+		qcom,iterm-ma = <200>;
+		qcom,recharge-thresh-mv = <100>;
+		qcom,thermal-mitigation = <1500 700 600 0>;
+		regulator-name = "smb1360_otg_vreg";
+		status= "disabled";
+	};
+};
+
+&firmware {
+	android {
+		compatible = "android,firmware";
+		fstab {
+			compatible = "android,fstab";
+			vendor_fstab: vendor {
+				fsmgr_flags = "wait,slotselect";
+			};
+			/delete-node/ system;
+		};
+	};
+};
+
+&pm8916_chg {
+	status = "ok";
+};
+
+&pm8916_bms {
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8009-mtp-wcd9326-refboard.dts b/arch/arm64/boot/dts/qcom/apq8009-mtp-wcd9326-refboard.dts
index a0a9c54..2afd5ac 100644
--- a/arch/arm64/boot/dts/qcom/apq8009-mtp-wcd9326-refboard.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009-mtp-wcd9326-refboard.dts
@@ -18,6 +18,8 @@
 #include "apq8009-audio-external_codec.dtsi"
 #include "apq8009-memory.dtsi"
 #include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
+#include "msm8909-pm8916-camera.dtsi"
+#include "msm8909-pm8916-camera-sensor-robot.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. APQ8009 WCD9326 Reference Board";
@@ -93,7 +95,7 @@
 };
 
 &soc {
-	sound-9335 {
+	ext_codec: sound-9335 {
 		qcom,audio-routing =
 			"AIF4 VI", "MCLK",
 			"RX_BIAS", "MCLK",
@@ -110,12 +112,22 @@
 			"MIC BIAS3", "Digital Mic3",
 			"SpkrLeft IN", "SPK1 OUT",
 			"SpkrRight IN", "SPK2 OUT";
-	};
 
-	i2c@78b8000 {
-		wcd9xxx_codec@d {
-			qcom,cdc-reset-gpio = <&msm_gpio 27 0>;
-		};
+		qcom,msm-gpios =
+			"us_eu_gpio";
+		qcom,pinctrl-names =
+			"all_off",
+			"us_eu_gpio_act";
+		pinctrl-names =
+			"all_off",
+			"us_eu_gpio_act";
+		pinctrl-0 = <&cross_conn_det_sus>;
+		pinctrl-1 = <&cross_conn_det_act>;
+		qcom,pri-mi2s-gpios = <&cdc_pri_mi2s_gpios>;
+		qcom,quat-mi2s-gpios = <&cdc_quat_mi2s_gpios>;
+
+		qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+					"SpkrLeft", "SpkrRight";
 	};
 
 	i2c@78b9000 {
@@ -203,6 +215,36 @@
 		qcom,id-det-gpio = <&msm_gpio 110 0>;
 		qcom,dpdm_switch_gpio = <&pm8916_gpios 3 0>;
 	};
+
+	i2c@78b8000 {
+		wcd9xxx_codec@d {
+			status = "okay";
+			qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
+		};
+	};
+
+	cdc_pri_mi2s_gpios: msm_cdc_pinctrl_pri {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&pri_mi2s_active &pri_mi2s_ws_active
+		     &pri_mi2s_dout_active &pri_mi2s_din_active>;
+		pinctrl-1 = <&pri_mi2s_sleep &pri_mi2s_ws_sleep
+		     &pri_mi2s_dout_sleep &pri_mi2s_din_sleep>;
+	};
+
+	cdc_quat_mi2s_gpios: msm_cdc_pinctrl_quat {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&quat_mi2s_active &quat_mi2s_din_active>;
+		pinctrl-1 = <&quat_mi2s_sleep &quat_mi2s_din_sleep>;
+	};
+
+	wcd_rst_gpio: wcd_gpio_ctrl {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&cdc_reset_active>;
+		pinctrl-1 = <&cdc_reset_sleep>;
+	};
 };
 
 &wcnss {
@@ -325,4 +367,16 @@
 	status = "disabled";
 };
 
+&wcd_rst_gpio {
+	status = "okay";
+};
+
+&ext_codec {
+	status = "okay";
+};
+
+&blsp1_uart2_hs {
+	status = "disabled";
+};
+
 /delete-node/ &cont_splash_mem;
diff --git a/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts b/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
index 958f7c8..ec5715d 100644
--- a/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
@@ -92,6 +92,28 @@
 	};
 };
 
+&i2c_1 {
+	status = "okay";
+	icm20602@68 {
+		compatible = "invensense,icm20602";
+		reg = <0x68>;
+		interrupt-parent = <&msm_gpio>;
+		interrupts = <12 0>;
+		invensense,icm20602-gpio = <&msm_gpio 12 0x0>;
+		vdd-ldo-supply = <&pm8916_l6>;
+		interrupt-names = "icm20602_irq";
+		pinctrl-names = "imu_active","imu_suspend";
+		pinctrl-0 = <&imu_int_active>;
+		pinctrl-1 = <&imu_int_suspend>;
+		status = "ok";
+	};
+	vl53l0x@29 {
+		compatible = "st,stmvl53l0";
+		reg = <0x29>;
+		status = "ok";
+	};
+};
+
 &wcnss {
 	status = "disabled";
 };
@@ -164,7 +186,7 @@
 };
 
 &external_image_mem {
-	reg = <0x0 0x87a00000 0x0 0x0600000>;
+	reg = <0x0 0x87900000 0x0 0x0700000>;
 };
 
 &modem_adsp_mem {
@@ -172,7 +194,7 @@
 };
 
 &peripheral_mem {
-	reg = <0x0 0x89e00000 0x0 0x0700000>;
+	status = "disabled";
 };
 
 &pm8916_chg {
@@ -186,3 +208,12 @@
 &blsp1_uart2_hs {
 	status = "ok";
 };
+
+&i2c_1 {
+	status = "okay";
+	vl53l0x@52 {
+		compatible = "st,stmvl53l0";
+		reg = <0x29>;
+		status = "ok";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8009w-bg-alpha.dts b/arch/arm64/boot/dts/qcom/apq8009w-bg-alpha.dts
index 1fe7b15..20878c0 100644
--- a/arch/arm64/boot/dts/qcom/apq8009w-bg-alpha.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009w-bg-alpha.dts
@@ -52,6 +52,7 @@
 	qcom,blackghost {
 		compatible = "qcom,pil-blackghost";
 
+		qcom,pil-force-shutdown;
 		qcom,firmware-name = "bg-wear";
 		/* GPIO inputs from blackghost */
 		qcom,bg2ap-status-gpio = <&msm_gpio 97 0>;
@@ -157,8 +158,11 @@
 		interrupts = <50 0>;
 		interrupt-names = "nfc_irq";
 		pinctrl-names = "nfc_active","nfc_suspend";
-		pinctrl-0 = <&nfcw_int_active &nfcw_disable_active>;
+		pinctrl-0 = <&nfcw_int_active
+			     &nfcw_disable_active
+			     &nfc_clk_default>;
 		pinctrl-1 = <&nfcw_int_suspend &nfcw_disable_suspend>;
+		clocks = <&clock_rpm clk_bb_clk3_pin>;
 		clock-names = "ref_clk";
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/apq8009w-bg-wtp-v2.dts b/arch/arm64/boot/dts/qcom/apq8009w-bg-wtp-v2.dts
index 8113670..e7af39f 100644
--- a/arch/arm64/boot/dts/qcom/apq8009w-bg-wtp-v2.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009w-bg-wtp-v2.dts
@@ -71,6 +71,7 @@
 	qcom,blackghost {
 		compatible = "qcom,pil-blackghost";
 
+		qcom,pil-force-shutdown;
 		qcom,firmware-name = "bg-wear";
 		/* GPIO inputs from blackghost */
 		qcom,bg2ap-status-gpio = <&msm_gpio 97 0>;
@@ -176,8 +177,11 @@
 		interrupts = <50 0>;
 		interrupt-names = "nfc_irq";
 		pinctrl-names = "nfc_active","nfc_suspend";
-		pinctrl-0 = <&nfcw_int_active &nfcw_disable_active>;
+		pinctrl-0 = <&nfcw_int_active
+			     &nfcw_disable_active
+			     &nfc_clk_default>;
 		pinctrl-1 = <&nfcw_int_suspend &nfcw_disable_suspend>;
+		clocks = <&clock_rpm clk_bb_clk3_pin>;
 		clock-names = "ref_clk";
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts b/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
index 9f0edda..8782001 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
@@ -86,3 +86,12 @@
 	qcom,model = "msm8953-tasha-snd-card";
 	status = "okay";
 };
+
+&soc {
+	usb_detect: usb_detect {
+		compatible = "linux,extcon-usb-gpio";
+		pintctrl-names = "default";
+		pinctrl-0 = <&ssusb_mode_sel>;
+		id-gpio = <&tlmm 12 0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dts
similarity index 86%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dts
index 325accf..6c9c266 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dts
@@ -13,10 +13,10 @@
 
 /dts-v1/;
 
-#include "apq8053-lite-lenovo-v1.0.dtsi"
+#include "apq8053-lite-dragon-v2.1.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. APQ8053 Lite Lenovo v1.0 Board";
+	model = "Qualcomm Technologies, Inc. APQ8053 Lite DragonBoard V2.1";
 	compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
 			"qcom,dragonboard";
 	qcom,board-id= <0x01010020 0>;
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dtsi
similarity index 97%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dtsi
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dtsi
index 4d9c40c..993799b 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dtsi
@@ -14,7 +14,7 @@
 #include "apq8053-lite-dragon.dtsi"
 
 &mdss_dsi0 {
-	qcom,ext_vdd-gpio = <&tlmm 100 0>;
+	qcom,ext-vdd-gpio = <&tlmm 100 0>;
 	qcom,platform-bklight-en-gpio = <&tlmm 95 0>;
 
 	qcom,platform-lane-config = [00 00 ff 0f
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.1.dts b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dts
similarity index 86%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.1.dts
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dts
index 0c7b557..ecc4fea 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.1.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dts
@@ -13,10 +13,10 @@
 
 /dts-v1/;
 
-#include "apq8053-lite-lenovo-v1.1.dtsi"
+#include "apq8053-lite-dragon-v2.2.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. APQ8053 Lite Lenovo v1.1 Board";
+	model = "Qualcomm Technologies, Inc. APQ8053 Lite DragonBoard V2.2";
 	compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
 			"qcom,dragonboard";
 	qcom,board-id= <0x01010120 0>;
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dtsi
similarity index 70%
copy from arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dtsi
copy to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dtsi
index 4d9c40c..1744c90 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dtsi
@@ -13,8 +13,33 @@
 
 #include "apq8053-lite-dragon.dtsi"
 
+&i2c_3 {
+	status = "okay";
+	/delete-node/ himax_ts@48;
+};
+
+&mdss_mdp {
+	qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_dsi {
+	hw-config = "single_dsi";
+};
+
 &mdss_dsi0 {
-	qcom,ext_vdd-gpio = <&tlmm 100 0>;
+	qcom,dsi-pref-prim-pan = <&dsi_boent51021_1200p_video>;
+	pinctrl-names = "mdss_default", "mdss_sleep";
+	pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+	pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+
+	vdd-supply = <&pm8953_l10>;
+	vddio-supply = <&pm8953_l6>;
+	lab-supply = <&lab_regulator>;
+	ibb-supply = <&ibb_regulator>;
+
+	qcom,platform-te-gpio = <&tlmm 24 0>;
+	qcom,platform-reset-gpio = <&tlmm 61 0>;
+	qcom,ext-vdd-gpio = <&tlmm 100 0>;
 	qcom,platform-bklight-en-gpio = <&tlmm 95 0>;
 
 	qcom,platform-lane-config = [00 00 ff 0f
@@ -24,6 +49,10 @@
 				00 00 ff 8f];
 };
 
+&mdss_dsi1 {
+	status = "disabled";
+};
+
 &eeprom0 {
 	gpios = <&tlmm 26 0>,
 		<&tlmm 40 0>,
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-harman-v1.0.dts b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dts
similarity index 86%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-harman-v1.0.dts
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dts
index 203b6b8..e3f80be 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-harman-v1.0.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dts
@@ -13,10 +13,10 @@
 
 /dts-v1/;
 
-#include "apq8053-lite-harman-v1.0.dtsi"
+#include "apq8053-lite-dragon-v2.3.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. APQ8053 Lite Harman v1.0 Board";
+	model = "Qualcomm Technologies, Inc. APQ8053 Lite DragonBoard V2.3";
 	compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
 			"qcom,dragonboard";
 	qcom,board-id= <0x01020020 0>;
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-harman-v1.0.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-harman-v1.0.dtsi
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dtsi
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lge-v1.0.dts b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dts
similarity index 86%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-lge-v1.0.dts
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dts
index 70952dc..1f40ef8 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lge-v1.0.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dts
@@ -13,10 +13,10 @@
 
 /dts-v1/;
 
-#include "apq8053-lite-lge-v1.0.dtsi"
+#include "apq8053-lite-dragon-v2.4.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. APQ8053 Lite LGE v1.0 Board";
+	model = "Qualcomm Technologies, Inc. APQ8053 Lite DragonBoard V2.4";
 	compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
 			"qcom,dragonboard";
 	qcom,board-id= <0x01030020 0>;
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lge-v1.0.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-lge-v1.0.dtsi
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dtsi
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.1.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.1.dtsi
deleted file mode 100644
index 396fd55..0000000
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.1.dtsi
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include "apq8053-lite-dragon.dtsi"
-
-&i2c_3 {
-	status = "okay";
-	/delete-node/ himax_ts@48;
-};
-
-&eeprom0 {
-	gpios = <&tlmm 26 0>,
-		<&tlmm 40 0>,
-		<&tlmm 118 0>,
-		<&tlmm 119 0>,
-		<&tlmm 39 0>;
-	qcom,gpio-vdig = <3>;
-	qcom,gpio-vana = <4>;
-	qcom,gpio-req-tbl-num = <0 1 2 3 4>;
-	qcom,gpio-req-tbl-flags = <1 0 0 0 0>;
-	qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
-			"CAM_RESET0",
-			"CAM_VDIG",
-			"CAM_VANA",
-			"CAM_STANDBY0";
-};
-
-&camera0 {
-	qcom,mount-angle = <270>;
-	gpios = <&tlmm 26 0>,
-		<&tlmm 40 0>,
-		<&tlmm 39 0>,
-		<&tlmm 118 0>,
-		<&tlmm 119 0>;
-	qcom,gpio-vdig = <3>;
-	qcom,gpio-vana = <4>;
-	qcom,gpio-req-tbl-num = <0 1 2 3 4>;
-	qcom,gpio-req-tbl-flags = <1 0 0 0 0>;
-	qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
-			"CAM_RESET0",
-			"CAM_STANDBY0",
-			"CAM_VDIG",
-			"CAM_VANA";
-};
-
-&camera1 {
-	qcom,mount-angle = <270>;
-};
-
-&camera2{
-	qcom,mount-angle = <270>;
-};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-390p-auo-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-390p-auo-cmd.dtsi
index b4ac287..06fc5a4 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-390p-auo-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-390p-auo-cmd.dtsi
@@ -70,6 +70,10 @@
 			29 01 00 00 00 00 05 2a 00 04 01 89
 			/* Reset row start address */
 			29 01 00 00 00 00 05 2b 00 00 01 85
+			15 01 00 00 00 00 02 fe 01
+			15 01 00 00 00 00 02 04 00
+			15 01 00 00 00 00 02 fe 00
+			15 01 00 00 00 00 02 3a 77
 			];
 		qcom,mdss-dsi-traffic-mode = "burst_mode";
 		qcom,mdss-dsi-lane-map = "lane_map_0123";
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-boent51021-1200p-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-boent51021-1200p-video.dtsi
new file mode 100644
index 0000000..04accd8
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-boent51021-1200p-video.dtsi
@@ -0,0 +1,92 @@
+/* Novatek Android Driver Sample Code for Novatek chipset
+ *
+ * Copyright (C) 2015-2018 Novatek Microelectronics Corp.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+&mdss_mdp {
+	dsi_boent51021_1200p_video: qcom,mdss_dsi_boent51021_1200p_video {
+		qcom,mdss-dsi-panel-name =
+			"boent51021 1200p video mode dsi panel";
+		qcom,mdss-dsi-panel-controller = <&mdss_dsi0>;
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <1200>;
+		qcom,mdss-dsi-panel-height = <1920>;
+		qcom,mdss-dsi-h-front-porch = <100>;
+		qcom,mdss-dsi-h-back-porch = <32>;
+		qcom,mdss-dsi-h-pulse-width = <1>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <14>;
+		qcom,mdss-dsi-v-front-porch = <25>;
+		qcom,mdss-dsi-v-pulse-width = <1>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-on-command = [
+			23 01 00 00 01 00 02 8f a5
+			23 01 00 00 00 00 02 83 00
+			23 01 00 00 00 00 02 84 00
+			23 01 00 00 00 00 02 8c 80
+			23 01 00 00 00 00 02 cd 6c
+			23 01 00 00 00 00 02 c8 fc
+			23 01 00 00 00 00 02 97 00
+			23 01 00 00 00 00 02 8b 10
+			23 01 00 00 00 00 02 a9 20
+			23 01 00 00 00 00 02 83 aa
+			23 01 00 00 00 00 02 84 11
+			23 01 00 00 00 00 02 a9 4b
+			23 01 00 00 00 00 02 85 04
+			23 01 00 00 00 00 02 86 08
+			23 01 00 00 00 00 02 9c 10
+			05 01 00 00 00 00 02 11 00
+			23 01 00 00 00 00 02 8f 00
+		];
+		qcom,mdss-dsi-off-command = [
+			23 01 00 00 00 00 02 83 00
+			23 01 00 00 78 00 02 84 00
+			05 01 00 00 78 00 02 10 00
+		];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-h-sync-pulse = <1>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-lane-map = "lane_map_0123";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-panel-timings = [
+			f2 3a 28 00 6c 70 2c 3e 2e 03 04 00];
+		qcom,mdss-dsi-t-clk-post = <0x0e>;
+		qcom,mdss-dsi-t-clk-pre = <0x33>;
+		qcom,mdss-dsi-bl-min-level = <1>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-force-clock-lane-hs;
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+		qcom,mdss-pan-physical-width-dimension = <135>;
+		qcom,mdss-pan-physical-height-dimension = <216>;
+		qcom,mdss-dsi-lp11-init;
+		qcom,mdss-dsi-post-init-delay = <1>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-hx8399c-hd-plus-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-hx8399c-hd-plus-video.dtsi
index 237684e..89c5178 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-hx8399c-hd-plus-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-hx8399c-hd-plus-video.dtsi
@@ -20,13 +20,13 @@
 		qcom,mdss-dsi-stream = <0>;
 		qcom,mdss-dsi-panel-width = <720>;
 		qcom,mdss-dsi-panel-height = <1440>;
-		qcom,mdss-dsi-h-front-porch = <24>;
-		qcom,mdss-dsi-h-back-porch = <24>;
+		qcom,mdss-dsi-h-front-porch = <48>;
+		qcom,mdss-dsi-h-back-porch = <48>;
 		qcom,mdss-dsi-h-pulse-width = <16>;
 		qcom,mdss-dsi-h-sync-skew = <0>;
 		qcom,mdss-dsi-v-back-porch = <40>;
-		qcom,mdss-dsi-v-front-porch = <36>;
-		qcom,mdss-dsi-v-pulse-width = <2>;
+		qcom,mdss-dsi-v-front-porch = <60>;
+		qcom,mdss-dsi-v-pulse-width = <4>;
 		qcom,mdss-dsi-h-left-border = <0>;
 		qcom,mdss-dsi-h-right-border = <0>;
 		qcom,mdss-dsi-v-top-border = <0>;
@@ -40,21 +40,21 @@
 				b9 ff 83 99
 			39 01 00 00 00 00 02
 				d2 88
-			39 01 00 00 00 00 10
-				b1 02 04 74 94 01 32 33
-				11 11 e6 5d 56 73 02 02
+			39 01 00 00 00 00 0c
+				b1 02 04 72 92 01
+				32 aa 11 11 52 57
 			39 01 00 00 00 00 10
 				b2 00 80 80 cc 05 07 5a
-				11 10 10 00 1e 70 03 D4
+				11 10 10 00 1e 70 03 d4
 			39 01 00 00 00 00 2d
-				b4 00 ff 59 59 0c ac 00
-				00 0c 00 07 0a 00 28 07
-				08 0c 21 03 00 00 00 ae
-				87 59 59 0c ac 00 00 0c
-				00 07 0a 00 28 07 08 0c
-				01 00 00 ae 01
+				b4 00 ff 59 59 01 ab 00
+				00 09 00 03 05 00 28 03
+				0b 0d 21 03 02 00 0c a3
+				80 59 59 02 ab 00 00 09
+				00 03 05 00 28 03 0b 0d
+				02 00 0c a3 01
 			39 01 00 00 05 00 22
-				d3 00 00 01 01 00 00 10
+				d3 00 0c 03 03 00 00 10
 				10 00 00 03 00 03 00 08
 				78 08 78 00 00 00 00 00
 				24 02 05 05 03 00 00 00
@@ -80,8 +80,8 @@
 			39 01 00 00 00 00 02
 				bd 01
 			39 01 00 00 00 00 11
-				d8 82 ea aa aa 82 ea aa
-				aa 82 ea aa aa 82 ea aa
+				d8 00 00 00 00 00 00 00
+				00 82 ea aa aa 82 ea aa
 				aa
 			39 01 00 00 00 00 02
 				bd 02
@@ -90,25 +90,23 @@
 				3f
 			39 01 00 00 00 00 02
 				bd 00
-			39 01 00 00 00 00 02
-				dd 03
 			39 01 00 00 05 00 37
-				e0 08 2a 39 35 74 7c 87
-				7f 84 8a 8e 91 93 96 9b
-				9c 9e a5 a6 ae a1 af b2
-				5c 58 63 74 08 2a 39 35
-				74 7c 87 7f 84 8a 8e 91
-				93 96 9b 9c 9e a5 a6 ae
-				a1 af b2 5c 58 63 74
+				e0 01 21 31 2d 66 6f 7b
+				75 7a 81 86 89 8c 90 95
+				97 9a a1 a2 aa 9e ad b0
+				5b 57 63 7a 01 21 31 2d
+				66 6f 7b 75 7a 81 86 89
+				8c 90 95 97 9a a1 a2 aa
+				9e ad b0 5b 57 63 7a
 			39 01 00 00 00 00 03
 				b6 7e 7e
 			39 01 00 00 00 00 02
 				cc 08
-			39 01 00 00 00 00 06
-				c7 00 08 00 01 08
-			39 01 00 00 00 00 03
-				c0 25 5a
-			05 01 00 00 78 00 02 11 00
+			39 01 00 00 00 00 02
+				35 00
+			39 01 00 00 00 00 02
+				dd 03
+			05 01 00 00 96 00 02 11 00
 			05 01 00 00 14 00 02 29 00];
 		qcom,mdss-dsi-off-command = [
 			05 01 00 00 14 00 02 28 00
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-icn9706-720-1440p-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-icn9706-720-1440p-video.dtsi
new file mode 100644
index 0000000..d50fe3b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-icn9706-720-1440p-video.dtsi
@@ -0,0 +1,98 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_icn9706_720_1440_vid: qcom,mdss_dsi_icn9706_720_1440p_video {
+		qcom,mdss-dsi-panel-name =
+			"icn9706 720 1440p video mode dsi panel";
+		qcom,mdss-dsi-panel-controller = <&mdss_dsi0>;
+		qcom,mdss-dsi-panel-destination = "display_1";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <720>;
+		qcom,mdss-dsi-panel-height = <1440>;
+		qcom,mdss-dsi-h-front-porch = <84>;
+		qcom,mdss-dsi-h-back-porch = <84>;
+		qcom,mdss-dsi-h-pulse-width = <24>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <20>;
+		qcom,mdss-dsi-v-front-porch = <24>;
+		qcom,mdss-dsi-v-pulse-width = <8>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-h-sync-pulse = <1>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-map = "lane_map_0123";
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-panel-timings = [8b 1e 14 00 44 48 18 22 19
+						03 04 00];
+		qcom,mdss-dsi-t-clk-post = <0x04>;
+		qcom,mdss-dsi-t-clk-pre = <0x1c>;
+		qcom,mdss-dsi-bl-min-level = <1>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-on-command = [39 01 00 00 64 00 02 01 00
+			39 01 00 00 00 00 03 f0 5a 5a
+			39 01 00 00 00 00 03 f1 5a 5a
+			39 01 00 00 00 00 03 f0 b4 4b
+			39 01 00 00 00 00 03 b6 10 10
+			39 01 00 00 00 00 15 b4 0a 08 12 10 0e 0c 00 00
+				00 03 00 03 03 03 03 03 03 03 04 06
+			39 01 00 00 00 00 15 b3 0b 09 13 11 0f 0d 00 00
+				00 03 00 03 03 03 03 03 03 03 05 07
+			39 01 00 00 00 00 0d b0 54 32 23 45 44 44 44 44
+				60 01 60 01
+			39 01 00 00 00 00 09 b1 32 84 02 83 15 01 57 01
+			39 01 00 00 00 00 02 b2 33
+			39 01 00 00 00 00 07 bd 54 14 6a 6a 20 19
+			39 01 00 00 00 00 12 b7 01 01 09 11 0d 15 19 0d
+				21 1d 00 00 20 00 02 ff 3c
+			39 01 00 00 00 00 06 b8 23 01 30 34 53
+			39 01 00 00 00 00 05 b9 a1 2c ff c4
+			39 01 00 00 00 00 03 ba 88 23
+			39 01 00 00 00 00 07 c1 16 16 04 0c 10 04
+			39 01 00 00 00 00 03 c2 12 68
+			39 01 00 00 00 00 04 c3 22 31 04
+			39 01 00 00 00 00 06 c7 05 23 6b 41 00
+			39 01 00 00 00 00 27 c8 7c 54 3d 2d 26 16 1b 08
+				25 28 2d 4f 3e 48 3d 3d 35 25 06 7c 54 3d 2d
+				26 16 1b 08 25 28 2d 4f 3e 48 3d 3d 35 25 06
+			39 01 00 00 00 00 09 c6 00 00 68 00 00 60 36 00
+			05 01 00 00 64 00 02 11 00
+			05 01 00 00 32 00 02 29 00];
+
+		qcom,mdss-dsi-off-command = [05 01 00 00 32 00 02 28 00
+			05 01 00 00 32 00 02 10 00];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+		qcom,esd-check-enabled;
+		qcom,mdss-dsi-panel-status-check-mode = "bta_check";
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+		qcom,mdss-dsi-reset-sequence = <1 2>, <0 20>, <1 50>;
+		qcom,mdss-dsi-tx-eot-append;
+		qcom,mdss-pan-physical-width-dimension = <63>;
+		qcom,mdss-pan-physical-height-dimension = <112>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp466076-3250mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp466076-3250mah.dtsi
new file mode 100644
index 0000000..2f48301
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp466076-3250mah.dtsi
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *GNU General Public License for more details.
+ */
+
+qcom,mlp466076_3250mah_averaged_masterslave_jun15th2018 {
+	/* #mlp466076_3250mAh_averaged_MasterSlave_Jun15th2018 */
+	qcom,max-voltage-uv = <4400000>;
+	qcom,nom-batt-capacity-mah = <3250>;
+	qcom,fastchg-current-ma = <6000000>;
+	qcom,jeita-fcc-ranges = <0  150  650000
+				151 450  4875000
+				451 550  1625000>;
+	qcom,jeita-fv-ranges =  <0   150 4150000
+				151 450 4400000
+				451 550 4150000>;
+	qcom,batt-id-kohm = <133>;
+	qcom,battery-beta = <4250>;
+	qcom,fg-cc-cv-threshold-mv = <4390>;
+	qcom,battery-type = "mlp466076_3250mah_jun15th2018";
+	qcom,checksum = <0x8905>;
+	qcom,gui-version = "PM660GUI - 0.0.0.45";
+	qcom,fg-profile-data = [
+		5E 21 D2 0D
+		E3 0B 04 05
+		EC 1C 8B 01
+		4F 05 31 03
+		80 18 D2 22
+		C2 45 73 52
+		90 00 00 00
+		13 00 00 00
+		00 00 82 C3
+		A3 CC 92 BC
+		2F 00 08 00
+		14 DA CE E5
+		B0 04 41 02
+		C5 F4 C4 12
+		0C 07 3F 32
+		2B 06 09 20
+		27 00 14 00
+		4C 20 E0 04
+		1A 0B A1 05
+		C4 1C E7 02
+		3E 0C 02 12
+		9D 18 4C 23
+		DC 44 15 5A
+		70 00 00 00
+		10 00 00 00
+		00 00 F6 07
+		1D CB 02 B4
+		20 00 00 00
+		5B E3 CE E5
+		C8 05 54 01
+		A6 06 BD FB
+		35 F4 47 23
+		C5 33 CC FF
+		07 10 00 00
+		38 0D 66 46
+		20 00 40 00
+		61 01 0A FA
+		FF 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+		00 00 00 00
+	];
+};
diff --git a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
index 9467297..13e5187 100644
--- a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
@@ -304,6 +304,7 @@
 
 	audio_apr: qcom,msm-audio-apr {
 		compatible = "qcom,msm-audio-apr";
+		qcom,subsys-name = "apr_adsp";
 	};
 
 	dai_pri_auxpcm: qcom,msm-pri-auxpcm {
diff --git a/arch/arm64/boot/dts/qcom/msm8909-gpu.dtsi b/arch/arm64/boot/dts/qcom/msm8909-gpu.dtsi
index dc95570..96d9ea7 100644
--- a/arch/arm64/boot/dts/qcom/msm8909-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909-gpu.dtsi
@@ -19,26 +19,15 @@
 	/* To use BIMC based bus governor */
 	gpubw: qcom,gpubw {
 		compatible = "qcom,devbw";
-		governor = "bw_hwmon";
+		governor = "bw_vbif";
 		qcom,src-dst-ports = <26 512>;
 		qcom,bw-tbl =
 			<    0 >,	/*   9.6 MHz */
-			<  381 >,	/*  50.0 MHz */
-			<  762 >,	/* 100.0 MHz */
 			< 1525 >,	/* 200.0 MHz */
 			< 3051 >,	/* 400.0 MHz */
 			< 4066 >;	/* 533.0 MHz */
 	};
 
-	qcom,gpu-bwmon@410000 {
-		compatible = "qcom,bimc-bwmon2";
-		reg = <0x00410000 0x300>, <0x00401000 0x200>;
-		reg-names = "base", "global_base";
-		interrupts = <0 183 4>;
-		qcom,mport = <2>;
-		qcom,target-dev = <&gpubw>;
-	};
-
 	msm_gpu: qcom,kgsl-3d0@01c00000 {
 		label = "kgsl-3d0";
 		compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
@@ -92,6 +81,9 @@
 		qcom,pm-qos-active-latency = <701>;
 		qcom,pm-qos-wakeup-latency = <701>;
 
+		/* Enable gpu cooling device */
+		#cooling-cells = <2>;
+
 		/* Power levels */
 		qcom,gpu-pwrlevels {
 			#address-cells = <1>;
@@ -103,24 +95,32 @@
 				reg = <0>;
 				qcom,gpu-freq = <456000000>;
 				qcom,bus-freq = <3>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <3>;
 			};
 
 			qcom,gpu-pwrlevel@1 {
 				reg = <1>;
 				qcom,gpu-freq = <307200000>;
 				qcom,bus-freq = <2>;
+				qcom,bus-min = <2>;
+				qcom,bus-max = <3>;
 			};
 
 			qcom,gpu-pwrlevel@2 {
 				reg = <2>;
 				qcom,gpu-freq = <200000000>;
-				qcom,bus-freq = <1>;
+				qcom,bus-freq = <2>;
+				qcom,bus-min = <1>;
+				qcom,bus-max = <2>;
 			};
 
 			qcom,gpu-pwrlevel@3 {
 				reg = <3>;
 				qcom,gpu-freq = <19200000>;
 				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
 			};
 		};
 
diff --git a/arch/arm64/boot/dts/qcom/msm8909-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8909-pinctrl.dtsi
index c86da64..743cf1c 100644
--- a/arch/arm64/boot/dts/qcom/msm8909-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909-pinctrl.dtsi
@@ -545,6 +545,33 @@
 			};
 		};
 
+		imu {
+			imu_int_active: imu_int_active{
+				mux {
+					pins = "gpio12";
+					function = "gpio";
+				};
+				config {
+					pins = "gpio12";
+					drive-strength = <6>;
+					bias-pull-up;
+				};
+			};
+
+			imu_int_suspend: imu_int_suspend{
+				mux {
+					pins = "gpio12";
+					function = "gpio";
+				};
+				config {
+					pins = "gpio12";
+					drive-strength = <6>;
+					bias-pull-up;
+				};
+			};
+
+		};
+
 		nfc {
 			nfcw_int_active: nfcw_int_active {
 				mux {
@@ -1890,6 +1917,34 @@
 			};
 		};
 
+		cdc_reset_ctrl {
+			cdc_reset_sleep: cdc_reset_sleep {
+				mux {
+					pins = "gpio27";
+					function = "gpio";
+				};
+				config {
+					pins = "gpio27";
+					drive-strength = <16>;
+					bias-disable;
+					output-low;
+				};
+			};
+			cdc_reset_active:cdc_reset_active {
+				mux {
+					pins = "gpio27";
+					function = "gpio";
+				};
+				config {
+					pins = "gpio27"; /* gpio67 old */
+					drive-strength = <16>;
+					bias-pull-down;
+					output-high;
+				};
+			};
+		};
+
+
 		cdc-dmic-lines {
 			cdc_dmic0_clk_act: dmic0_clk_on {
 				mux {
diff --git a/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot.dtsi b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot.dtsi
new file mode 100644
index 0000000..6f6655a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot.dtsi
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&i2c_3 {
+	status = "ok";
+};
+
+&i2c_3 {
+	qcom,camera@0 {
+		cell-index = <0>;
+		compatible = "qcom,camera";
+		reg = <0x2>;
+		qcom,csiphy-sd-index = <0>;
+		qcom,csid-sd-index = <0>;
+		qcom,mount-angle = <90>;
+		cam_vdig-supply = <&pm8916_l2>;
+		cam_vana-supply = <&pm8916_l17>;
+		cam_vio-supply = <&pm8916_l6>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+		qcom,cam-vreg-min-voltage = <1200000 1800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1200000 1800000 2850000>;
+		qcom,cam-vreg-op-mode = <200000 0 80000>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_default
+				&cam_sensor_rear_default>;
+		pinctrl-1 = <&cam_sensor_mclk0_sleep &cam_sensor_rear_sleep>;
+		gpios = <&msm_gpio 26 0>,
+			<&msm_gpio 35 0>,
+			<&msm_gpio 34 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK",
+			"CAM_RESET",
+			"CAM_STANDBY";
+		qcom,sensor-position = <0>;
+		qcom,sensor-mode = <0>;
+		status = "ok";
+		clocks = <&clock_gcc clk_mclk0_clk_src>,
+				<&clock_gcc clk_gcc_camss_mclk0_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <24000000 0>;
+	};
+
+	qcom,camera@1 {
+		cell-index = <1>;
+		compatible = "qcom,camera";
+		reg = <0x1>;
+		qcom,csiphy-sd-index = <0>;
+		qcom,csid-sd-index = <0>;
+		qcom,mount-angle = <90>;
+		cam_vana-supply = <&pm8916_l17>;
+		cam_vio-supply = <&pm8916_l6>;
+		qcom,cam-vreg-name = "cam_vio","cam_vana";
+		qcom,cam-vreg-min-voltage = <1800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1800000 2850000>;
+		qcom,cam-vreg-op-mode = <0 80000>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_default
+				&cam_sensor_rear_default>;
+		pinctrl-1 = <&cam_sensor_mclk0_sleep &cam_sensor_rear_sleep>;
+		gpios = <&msm_gpio 26 0>,
+			<&msm_gpio 35 0>,
+			<&msm_gpio 34 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK",
+			"CAM_RESET",
+			"CAM_STANDBY";
+		qcom,sensor-position = <0>;
+		qcom,sensor-mode = <0>;
+		status = "ok";
+		clocks = <&clock_gcc clk_mclk0_clk_src>,
+				<&clock_gcc clk_gcc_camss_mclk0_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <24000000 0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera.dtsi b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera.dtsi
new file mode 100644
index 0000000..0b648ec
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera.dtsi
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,msm-cam@1800000{
+		compatible = "qcom,msm-cam";
+		reg = <0x1b00000 0x40000>;
+		reg-names = "msm-cam";
+		status = "ok";
+		bus-vectors = "suspend", "svs", "nominal", "turbo";
+		qcom,bus-votes = <0 320000000 640000000 640000000>;
+	};
+
+	qcom,csiphy@1b0ac00 {
+		cell-index = <0>;
+		compatible = "qcom,csiphy-v3.1", "qcom,csiphy";
+		reg = <0x1b0ac00 0x200>,
+			<0x1b00030 0x4>;
+		reg-names = "csiphy", "csiphy_clk_mux";
+		interrupts = <0 78 0>;
+		interrupt-names = "csiphy";
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_csi0phytimer_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi0phytimer_clk>,
+			<&clock_gcc clk_camss_top_ahb_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi0phy_clk>,
+			<&clock_gcc clk_gcc_camss_csi1phy_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		clock-names = "camss_top_ahb_clk", "ispif_ahb_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ahb_src", "csi0_phy_clk", "csi1_phy_clk",
+			"camss_ahb_clk";
+		qcom,clock-rates = <0 0 200000000 0 0 0 0 0>;
+	};
+
+	qcom,csid@1b08000  {
+		cell-index = <0>;
+		compatible = "qcom,csid-v3.1", "qcom,csid";
+		reg = <0x1b08000 0x100>;
+		reg-names = "csid";
+		interrupts = <0 49 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8916_l2>;
+		clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_csi0_ahb_clk>,
+			<&clock_gcc clk_csi0_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi0_clk>,
+			<&clock_gcc clk_gcc_camss_csi0pix_clk>,
+			<&clock_gcc clk_gcc_camss_csi0rdi_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		clock-names = "ispif_ahb_clk", "camss_top_ahb_clk",
+			"csi_ahb_clk", "csi_src_clk",
+			"csi_clk",  "csi_pix_clk",
+			"csi_rdi_clk", "camss_ahb_clk";
+		qcom,clock-rates = <40000000 0 0 200000000 0 0 0 0>;
+	};
+
+	qcom,csid@1b08400 {
+		cell-index = <1>;
+		compatible = "qcom,csid-v3.1", "qcom,csid";
+		reg = <0x1b08400 0x100>;
+		reg-names = "csid";
+		interrupts = <0 50 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8916_l2>;
+		clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_csi1_ahb_clk>,
+			<&clock_gcc clk_csi1_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi1_clk>,
+			<&clock_gcc clk_gcc_camss_csi1pix_clk>,
+			<&clock_gcc clk_gcc_camss_csi1rdi_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_csi1phy_clk>;
+		clock-names = "ispif_ahb_clk", "camss_top_ahb_clk",
+			"csi_ahb_clk", "csi_src_clk",
+			"csi_clk", "csi_pix_clk",
+			"csi_rdi_clk", "camss_ahb_clk", "camss_csi1_phy";
+		qcom,clock-rates = <40000000 0 0 200000000 0 0 0 0 0>;
+	};
+
+	qcom,ispif@1b0a000 {
+		cell-index = <0>;
+		compatible = "qcom,ispif";
+		reg = <0x1b0a000 0x500>,
+			<0x1b00020 0x10>;
+		reg-names = "ispif", "csi_clk_mux";
+		interrupts = <0 51 0>;
+		interrupt-names = "ispif";
+		qcom,num-isps = <0x1>;
+		vfe0_vdd_supply = <&gdsc_vfe>;
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+
+			<&clock_gcc clk_csi0_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi0_clk>,
+			<&clock_gcc clk_gcc_camss_csi0rdi_clk>,
+			<&clock_gcc clk_gcc_camss_csi0pix_clk>,
+			<&clock_gcc clk_csi1_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi1_clk>,
+			<&clock_gcc clk_gcc_camss_csi1rdi_clk>,
+			<&clock_gcc clk_gcc_camss_csi1pix_clk>,
+			<&clock_gcc clk_vfe0_clk_src>,
+			<&clock_gcc clk_gcc_camss_vfe0_clk>,
+			<&clock_gcc clk_gcc_camss_csi_vfe0_clk>;
+
+		clock-names = "camss_top_ahb_clk", "ispif_ahb_clk",
+			"csi0_src_clk", "csi0_clk",
+			"csi0_rdi_clk", "csi0_pix_clk",
+			"csi1_src_clk", "csi1_clk",
+			"csi1_rdi_clk", "csi1_pix_clk",
+			"vfe0_clk_src", "camss_vfe_vfe0_clk",
+			"camss_csi_vfe0_clk";
+		qcom,clock-rates = <0 40000000
+			200000000 0 0 0
+			200000000 0 0 0
+			0 0 0>;
+		qcom,clock-control = "NO_SET_RATE", "SET_RATE",
+			"SET_RATE", "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
+			"SET_RATE", "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
+			"INIT_RATE", "NO_SET_RATE", "NO_SET_RATE";
+	};
+
+	qcom,vfe@1b10000 {
+		cell-index = <0>;
+		compatible = "qcom,vfe32";
+		reg = <0x1b10000 0x830>,
+			<0x1b40000 0x200>;
+		reg-names = "vfe", "vfe_vbif";
+		interrupts = <0 52 0>;
+		interrupt-names = "vfe";
+		vdd-supply = <&gdsc_vfe>;
+		clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_vfe0_clk_src>,
+			<&clock_gcc clk_gcc_camss_vfe0_clk>,
+			<&clock_gcc clk_gcc_camss_csi_vfe0_clk>,
+			<&clock_gcc clk_gcc_camss_vfe_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_vfe_axi_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_top_ahb_clk>;
+		clock-names = "camss_top_ahb_clk", "vfe_clk_src",
+			"camss_vfe_vfe_clk", "camss_csi_vfe_clk", "iface_clk",
+			"bus_clk", "camss_ahb_clk", "ispif_ahb_clk";
+		qcom,clock-rates = <40000000 266670000 0 0 0 0 0 0>;
+
+		qos-entries = <8>;
+		qos-regs = <0x7BC 0x7C0 0x7C4 0x7C8 0x7CC 0x7D0
+				0x7D4 0x798>;
+		qos-settings = <0xAAA5AAA5 0xAAA5AAA5 0xAAA5AAA5
+				0xAAA5AAA5 0xAAA5AAA5 0xAAA5AAA5
+				0xAAA5AAA5 0x00010000>;
+		vbif-entries = <1>;
+		vbif-regs = <0x04>;
+		vbif-settings = <0x1>;
+		ds-entries = <15>;
+		ds-regs = <0x7D8 0x7DC 0x7E0 0x7E4 0x7E8
+			0x7EC 0x7F0 0x7F4 0x7F8 0x7FC 0x800
+			0x804 0x808 0x80C 0x810>;
+		ds-settings = <0xCCCC1111 0xCCCC1111 0xCCCC1111
+				0xCCCC1111 0xCCCC1111 0xCCCC1111
+				0xCCCC1111 0xCCCC1111 0xCCCC1111
+				0xCCCC1111 0xCCCC1111 0xCCCC1111
+				0xCCCC1111 0xCCCC1111 0x00000103>;
+
+		bus-util-factor = <1024>;
+	};
+
+	qcom,cam_smmu {
+		status = "ok";
+		compatible = "qcom,msm-cam-smmu";
+		msm_cam_smmu_cb1: msm_cam_smmu_cb1 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_iommu 0x400 0x00>;
+			label = "vfe";
+			qcom,scratch-buf-support;
+		};
+	};
+
+	qcom,irqrouter@1b00000 {
+		status = "ok";
+		cell-index = <0>;
+		compatible = "qcom,irqrouter";
+		reg = <0x1b00000 0x100>;
+		reg-names = "irqrouter";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8909-regulator.dtsi b/arch/arm64/boot/dts/qcom/msm8909-regulator.dtsi
index 7197f88..70d4939 100644
--- a/arch/arm64/boot/dts/qcom/msm8909-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909-regulator.dtsi
@@ -48,6 +48,12 @@
 			qcom,init-voltage = <1>;
 			qcom,use-voltage-corner;
 		};
+		pm8909_cx_cdev: regulator-cx-cdev {
+			compatible = "qcom,regulator-cooling-device";
+			regulator-cdev-supply = <&pm8909_s1_floor_corner>;
+			regulator-levels = <5 1>;
+			#cooling-cells = <2>;
+		};
 	};
 
 	rpm-regulator-smpa2 {
diff --git a/arch/arm64/boot/dts/qcom/msm8909-thermal.dtsi b/arch/arm64/boot/dts/qcom/msm8909-thermal.dtsi
new file mode 100644
index 0000000..21c393b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8909-thermal.dtsi
@@ -0,0 +1,477 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/thermal/thermal.h>
+
+&soc {
+	qmi-tmd-devices {
+		compatible = "qcom,qmi_cooling_devices";
+
+		modem {
+			qcom,instance-id = <0x0>;
+
+			modem_pa: modem_pa {
+				qcom,qmi-dev-name = "pa";
+				#cooling-cells = <2>;
+			};
+
+			modem_proc: modem_proc {
+				qcom,qmi-dev-name = "modem";
+				#cooling-cells = <2>;
+			};
+
+			modem_current: modem_current {
+				qcom,qmi-dev-name = "modem_current";
+				#cooling-cells = <2>;
+			};
+
+			modem_vdd: modem_vdd {
+				qcom,qmi-dev-name = "cpuv_restriction_cold";
+				#cooling-cells = <2>;
+			};
+		};
+	};
+};
+
+&thermal_zones {
+	mdm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&tsens0 0>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	camera-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&tsens0 1>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	gpu-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&tsens0 2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	cpu0-2-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&tsens0 3>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	cpu1-3-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&tsens0 4>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	pop-mem-step {
+		polling-delay-passive = <250>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 0>;
+		thermal-governor = "step_wise";
+		trips {
+			pop_trip: pop-trip {
+				temperature = <75000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			pop_cdev0 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU0 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev1 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev2 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU2 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev3 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU3 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+		};
+	};
+
+	gpu-step {
+		polling-delay-passive = <250>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 2>;
+		thermal-governor = "step_wise";
+		trips {
+			gpu_step_trip: gpu-step-trip {
+				temperature = <80000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			gpu_cdev0 {
+				trip = <&gpu_step_trip>;
+				cooling-device =
+					<&msm_gpu THERMAL_NO_LIMIT
+						THERMAL_NO_LIMIT>;
+			};
+		};
+	};
+
+	cpu0-2-step {
+		polling-delay-passive = <65>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 3>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu0_2_step_trip: cpu0-2-step-trip {
+				temperature = <85000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_cdev {
+				trip = <&cpu0_2_step_trip>;
+				cooling-device =
+					<&CPU0 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu1_cdev {
+				trip = <&cpu0_2_step_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu2_cdev {
+				trip = <&cpu0_2_step_trip>;
+				cooling-device =
+					<&CPU2 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu3_cdev {
+				trip = <&cpu0_2_step_trip>;
+				cooling-device =
+					<&CPU3 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+		};
+	};
+
+	cpu1-3-step {
+		polling-delay-passive = <65>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 4>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu1_3_step_trip: cpu1-3-step-trip {
+				temperature = <85000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_cdev {
+				trip = <&cpu1_3_step_trip>;
+				cooling-device =
+					<&CPU0 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu1_cdev {
+				trip = <&cpu1_3_step_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu2_cdev {
+				trip = <&cpu1_3_step_trip>;
+				cooling-device =
+					<&CPU2 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu3_cdev {
+				trip = <&cpu1_3_step_trip>;
+				cooling-device =
+					<&CPU3 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+		};
+	};
+	mdm-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 0>;
+		tracks-low;
+		trips {
+			mdm_trip: mdm-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device =
+					<&CPU0 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu1_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device =
+					<&CPU1 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu2_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device =
+					<&CPU2 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu3_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device =
+					<&CPU3 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cx_vdd_cdev {
+				trip = <&mdm_trip>;
+				cooling-device = <&pm8909_cx_cdev 0 0>;
+			};
+		};
+	};
+	camera-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 1>;
+		tracks-low;
+		trips {
+			camera_trip: camera-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device =
+					<&CPU0 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu1_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device =
+					<&CPU1 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu2_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device =
+					<&CPU2 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu3_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device =
+					<&CPU3 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cx_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&pm8909_cx_cdev 0 0>;
+			};
+		};
+	};
+	gpu-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 2>;
+		tracks-low;
+		trips {
+			gpu_trip: gpu-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&gpu_trip>;
+				cooling-device =
+					<&CPU0 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu1_vdd_cdev {
+				trip = <&gpu_trip>;
+				cooling-device =
+					<&CPU1 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu2_vdd_cdev {
+				trip = <&gpu_trip>;
+				cooling-device =
+					<&CPU2 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu3_vdd_cdev {
+				trip = <&gpu_trip>;
+				cooling-device =
+					<&CPU3 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cx_vdd_cdev {
+				trip = <&gpu_trip>;
+				cooling-device = <&pm8909_cx_cdev 0 0>;
+			};
+		};
+	};
+	cpu0-2-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 3>;
+		tracks-low;
+		trips {
+			cpu0_2_trip: cpu0-2-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu0_2_trip>;
+				cooling-device =
+					<&CPU0 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu1_vdd_cdev {
+				trip = <&cpu0_2_trip>;
+				cooling-device =
+					<&CPU1 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu2_vdd_cdev {
+				trip = <&cpu0_2_trip>;
+				cooling-device =
+					<&CPU2 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu3_vdd_cdev {
+				trip = <&cpu0_2_trip>;
+				cooling-device =
+					<&CPU3 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu0_2_trip>;
+				cooling-device = <&pm8909_cx_cdev 0 0>;
+			};
+		};
+	};
+	cpu1-3-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 4>;
+		tracks-low;
+		trips {
+			cpu1_3_trip: cpu1-3-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu1_3_trip>;
+				cooling-device =
+					<&CPU0 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu1_vdd_cdev {
+				trip = <&cpu1_3_trip>;
+				cooling-device =
+					<&CPU1 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu2_vdd_cdev {
+				trip = <&cpu1_3_trip>;
+				cooling-device =
+					<&CPU2 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cpu3_vdd_cdev {
+				trip = <&cpu1_3_trip>;
+				cooling-device =
+					<&CPU3 (THERMAL_MAX_LIMIT-2)
+						(THERMAL_MAX_LIMIT-2)>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu1_3_trip>;
+				cooling-device = <&pm8909_cx_cdev 0 0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8909.dtsi b/arch/arm64/boot/dts/qcom/msm8909.dtsi
index a72fdbf..c48756c 100644
--- a/arch/arm64/boot/dts/qcom/msm8909.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909.dtsi
@@ -77,7 +77,7 @@
 			efficiency = <1024>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			qcom,sleep-status = <&cpu0_slp_sts>;
-			qcom,limits-info = <&mitigation_profile0>;
+			#cooling-cells = <2>;
 		};
 
 		CPU1: cpu@1 {
@@ -87,7 +87,7 @@
 			efficiency = <1024>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			qcom,sleep-status = <&cpu1_slp_sts>;
-			qcom,limits-info = <&mitigation_profile2>;
+			#cooling-cells = <2>;
 		};
 
 		CPU2: cpu@2 {
@@ -97,7 +97,7 @@
 			efficiency = <1024>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			qcom,sleep-status = <&cpu2_slp_sts>;
-			qcom,limits-info = <&mitigation_profile1>;
+			#cooling-cells = <2>;
 		};
 
 		CPU3: cpu@3 {
@@ -107,7 +107,7 @@
 			efficiency = <1024>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			qcom,sleep-status = <&cpu3_slp_sts>;
-			qcom,limits-info = <&mitigation_profile2>;
+			#cooling-cells = <2>;
 		};
 	};
 
@@ -357,6 +357,7 @@
 		reg = <0x1800000 0x80000>,
 		      <0xb016000 0x00040>;
 		reg-names = "cc_base", "apcs_base";
+		qcom,gfx3d_clk_src-opp-store-vcorner = <&msm_gpu>;
 		vdd_dig-supply = <&pm8909_s1_corner>;
 		vdd_sr2_dig-supply = <&pm8909_s1_corner_ao>;
 		vdd_sr2_pll-supply = <&pm8909_l7_ao>;
@@ -559,77 +560,16 @@
 
 	};
 
-	qcom,sensor-information {
-		compatible = "qcom,sensor-information";
-		sensor_information0: qcom,sensor-information-0 {
-			qcom,sensor-type = "tsens";
-			qcom,sensor-name = "tsens_tz_sensor0";
-			qcom,alias-name = "pop_mem";
-		};
-
-		sensor_information1: qcom,sensor-information-1 {
-			qcom,sensor-type =  "tsens";
-			qcom,sensor-name = "tsens_tz_sensor1";
-		};
-
-		sensor_information2: qcom,sensor-information-2 {
-			qcom,sensor-type =  "tsens";
-			qcom,sensor-name = "tsens_tz_sensor2";
-		};
-
-		sensor_information3: qcom,sensor-information-3 {
-			qcom,sensor-type =  "tsens";
-			qcom,sensor-name = "tsens_tz_sensor3";
-		};
-
-		sensor_information4: qcom,sensor-information-4 {
-			qcom,sensor-type = "tsens";
-			qcom,sensor-name = "tsens_tz_sensor4";
-		};
-
-		sensor_information5: qcom,sensor-information-5 {
-			qcom,sensor-type = "adc";
-			qcom,sensor-name = "pa_therm0";
-		};
-
-		sensor_information6: qcom,sensor-information-6 {
-			qcom,sensor-type = "adc";
-			qcom,sensor-name = "case_therm";
-		};
-
-		sensor_information7: qcom,sensor-information-7 {
-			qcom,sensor-type = "alarm";
-			qcom,sensor-name = "pm8909_tz";
-			qcom,scaling-factor = <1000>;
-		};
-
-		sensor_information8: qcom,sensor-information-8 {
-			qcom,sensor-type = "adc";
-			qcom,sensor-name = "xo_therm";
-		};
-
-		sensor_information9: qcom,sensor-information-9 {
-			qcom,sensor-type = "adc";
-			qcom,sensor-name = "xo_therm_buf";
-		};
-	};
-
-	mitigation_profile0: qcom,limit_info-0 {
-		qcom,temperature-sensor = <&sensor_information3>;
-		qcom,boot-frequency-mitigate;
-		qcom,emergency-frequency-mitigate;
-	};
-
-	mitigation_profile1: qcom,limit_info-1 {
-		qcom,temperature-sensor = <&sensor_information3>;
-		qcom,boot-frequency-mitigate;
-		qcom,hotplug-mitigation-enable;
-	};
-
-	mitigation_profile2: qcom,limit_info-2 {
-		qcom,temperature-sensor = <&sensor_information4>;
-		qcom,boot-frequency-mitigate;
-		qcom,hotplug-mitigation-enable;
+	tsens0: tsens@4a8000 {
+		compatible = "qcom,msm8909-tsens";
+		reg = <0x4a8000 0x1000>,
+			<0x4a9000 0x1000>,
+			<0x5c000  0x1000>;
+		reg-names = "tsens_srot_physical",
+			"tsens_tm_physical", "tsens_eeprom_physical";
+		interrupts = <0 184 0>;
+		interrupt-names = "tsens-upper-lower";
+		#thermal-sensor-cells = <1>;
 	};
 
 	qcom,ipc-spinlock@1905000 {
@@ -1412,7 +1352,7 @@
 		clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
 			 <&clock_gcc clk_gcc_blsp1_qup1_i2c_apps_clk>;
 		clock-names = "iface_clk", "core_clk";
-		qcom,clk-freq-out = <100000>;
+		qcom,clk-freq-out = <400000>;
 		qcom,clk-freq-in  = <19200000>;
 		pinctrl-names = "i2c_active", "i2c_sleep";
 		pinctrl-0 = <&i2c_1_active>;
@@ -2003,3 +1943,4 @@
 	clocks = <&clock_gcc clk_gcc_oxili_gfx3d_clk>;
 	status = "okay";
 };
+#include "msm8909-thermal.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/msm8909w-bg-wtp-v2.dts b/arch/arm64/boot/dts/qcom/msm8909w-bg-wtp-v2.dts
index 9dd80f0..255c146 100644
--- a/arch/arm64/boot/dts/qcom/msm8909w-bg-wtp-v2.dts
+++ b/arch/arm64/boot/dts/qcom/msm8909w-bg-wtp-v2.dts
@@ -54,6 +54,8 @@
 			synaptics,power-delay-ms = <200>;
 			synaptics,reset-delay-ms = <200>;
 			synaptics,max-y-for-2d = <389>;
+			synaptics,bus-lpm-cur-uA = <450>;
+			synaptics,do-not-disable-regulators;
 			synaptics,wakeup-gestures-en;
 			synaptics,resume-in-workqueue;
 			synaptics,x-flip;
@@ -71,6 +73,7 @@
 
 	qcom,blackghost {
 		compatible = "qcom,pil-blackghost";
+		qcom,pil-force-shutdown;
 		qcom,firmware-name = "bg-wear";
 		/* GPIO inputs from blackghost */
 		qcom,bg2ap-status-gpio = <&msm_gpio 97 0>;
@@ -221,6 +224,7 @@
 			     &nfcw_disable_active
 			     &nfc_clk_default>;
 		pinctrl-1 = <&nfcw_int_suspend &nfcw_disable_suspend>;
+		clocks = <&clock_rpm clk_bb_clk3_pin>;
 		clock-names = "ref_clk";
 	};
 };
@@ -241,6 +245,33 @@
 	status = "disabled";
 };
 
+&sdc1_clk_off {
+	config {
+		pins = "sdc1_clk";
+		bias-disable; /* NO pull */
+		drive-strength = <2>; /* 2 MA */
+		output-low;
+	};
+};
+
+&sdc1_cmd_off {
+	config {
+		pins = "sdc1_cmd";
+		bias-disable; /* NO pull */
+		drive-strength = <2>; /* 2 MA */
+		output-low;
+	};
+};
+
+&sdc1_data_off {
+	config {
+		pins = "sdc1_data";
+		bias-disable; /* NO pull */
+		drive-strength = <2>; /* 2 MA */
+		output-low;
+	};
+};
+
 &sdhc_2 {
 	status = "disabled";
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8909w-pm660-regulator.dtsi b/arch/arm64/boot/dts/qcom/msm8909w-pm660-regulator.dtsi
index 512b0fb..ecf28c5 100644
--- a/arch/arm64/boot/dts/qcom/msm8909w-pm660-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909w-pm660-regulator.dtsi
@@ -52,6 +52,13 @@
 			qcom,init-voltage = <1>;
 			qcom,use-voltage-corner;
 		};
+
+		pm660_cx_cdev: regulator-cx-cdev {
+			compatible = "qcom,regulator-cooling-device";
+			regulator-cdev-supply = <&pm660_s2_floor_corner>;
+			regulator-levels = <5 1>;
+			#cooling-cells = <2>;
+		};
 	};
 
 	/* MX supply */
diff --git a/arch/arm64/boot/dts/qcom/msm8909w.dtsi b/arch/arm64/boot/dts/qcom/msm8909w.dtsi
index c2e28d1..7229564 100644
--- a/arch/arm64/boot/dts/qcom/msm8909w.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909w.dtsi
@@ -105,3 +105,127 @@
 	qcom,platform-reset-gpio = <&msm_gpio 25 0>;
 	qcom,platform-bklight-en-gpio = <&msm_gpio 37 0>;
 };
+
+&thermal_zones {
+	gpu-step {
+		trips {
+			gpu-step-trip {
+				temperature = <70000>;
+			};
+		};
+	};
+	cpu0-2-step {
+		trips {
+			cpu0-2-step-trip {
+				temperature = <60000>;
+			};
+		};
+	};
+	cpu1-3-step {
+		trips {
+			cpu1-3-step-trip {
+				temperature = <60000>;
+			};
+		};
+	};
+
+	case-therm-step {
+		polling-delay-passive = <5000>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm660_adc_tm 0x51>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu_freq_trip: cpu-freq-trip {
+				temperature = <40000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			modem_mon_trip0: modem-mon-trip0 {
+				temperature = <47000>;
+				hysteresis = <2000>;
+				type = "passive";
+			};
+			modem_mon_trip1: modem-mon-trip1 {
+				temperature = <55000>;
+				hysteresis = <2000>;
+				type = "passive";
+			};
+			cpu1_hotplug_trip: cpu1-hotplug-trip {
+				temperature = <49000>;
+				hysteresis = <3000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_cdev {
+				trip = <&cpu_freq_trip>;
+				cooling-device =
+					<&CPU0 THERMAL_NO_LIMIT 2>;
+			};
+			cpu1_cdev {
+				trip = <&cpu_freq_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_NO_LIMIT 2>;
+			};
+			cpu2_cdev {
+				trip = <&cpu_freq_trip>;
+				cooling-device =
+					<&CPU2 THERMAL_NO_LIMIT 2>;
+			};
+			cpu3_cdev {
+				trip = <&cpu_freq_trip>;
+				cooling-device =
+					<&CPU3 THERMAL_NO_LIMIT 2>;
+			};
+			modem_lvl1 {
+				trip = <&modem_mon_trip0>;
+				cooling-device = <&modem_pa 2 2>;
+			};
+			modem_lvl2 {
+				trip = <&modem_mon_trip1>;
+				cooling-device = <&modem_pa 3 3>;
+			};
+			hotplug_cpu1_cdev {
+				trip = <&cpu1_hotplug_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+	mdm-lowf {
+		cooling-maps {
+			cx_vdd_cdev {
+				cooling-device = <&pm660_cx_cdev 0 0>;
+			};
+		};
+	};
+	camera-lowf {
+		cooling-maps {
+			cx_vdd_cdev {
+				cooling-device = <&pm660_cx_cdev 0 0>;
+			};
+		};
+	};
+	gpu-lowf {
+		cooling-maps {
+			cx_vdd_cdev {
+				cooling-device = <&pm660_cx_cdev 0 0>;
+			};
+		};
+	};
+	cpu0-2-lowf {
+		cooling-maps {
+			cx_vdd_cdev {
+				cooling-device = <&pm660_cx_cdev 0 0>;
+			};
+		};
+	};
+	cpu1-3-lowf {
+		cooling-maps {
+			cx_vdd_cdev {
+				cooling-device = <&pm660_cx_cdev 0 0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-regulator.dtsi b/arch/arm64/boot/dts/qcom/msm8916-regulator.dtsi
index 0313ebd..36a67af 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-regulator.dtsi
@@ -141,6 +141,12 @@
 			qcom,use-voltage-floor-corner;
 			qcom,always-send-voltage;
 		};
+		pm8916_cx_cdev: regulator-cx-cdev {
+			compatible = "qcom,regulator-cooling-device";
+			regulator-cdev-supply = <&pm8916_s1_floor_corner>;
+			regulator-levels = <5 1>;
+			#cooling-cells = <2>;
+		};
 	};
 
 	rpm-regulator-smpa3 {
diff --git a/arch/arm64/boot/dts/qcom/msm8917-camera-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8917-camera-pinctrl.dtsi
new file mode 100644
index 0000000..33cc8ae
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-camera-pinctrl.dtsi
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+cci {
+	cci0_active: cci0_active {
+		/* cci0 active state */
+		mux {
+			/* CLK, DATA */
+			pins = "gpio29", "gpio30";
+			function = "cci_i2c";
+		};
+
+		config {
+			pins = "gpio29", "gpio30";
+			drive-strength = <2>; /* 2 MA */
+			bias-disable; /* No PULL */
+		};
+	};
+
+	cci0_suspend: cci0_suspend {
+		/* cci0 suspended state */
+		mux {
+			/* CLK, DATA */
+			pins = "gpio29", "gpio30";
+			function = "cci_i2c";
+		};
+
+		config {
+			pins = "gpio29", "gpio30";
+			drive-strength = <2>; /* 2 MA */
+			bias-disable; /* No PULL */
+		};
+	};
+
+	cci1_active: cci1_active {
+		/* cci1 active state */
+		mux {
+			/* CLK, DATA */
+			pins = "gpio31", "gpio32";
+			function = "cci_i2c";
+		};
+
+		config {
+			pins = "gpio31", "gpio32";
+			drive-strength = <2>; /* 2 MA */
+			bias-disable; /* No PULL */
+		};
+	};
+
+	cci1_suspend: cci1_suspend {
+		/* cci1 suspended state */
+		mux {
+			/* CLK, DATA */
+			pins = "gpio31", "gpio32";
+			function = "cci_i2c";
+		};
+
+		config {
+			pins = "gpio31", "gpio32";
+			drive-strength = <2>; /* 2 MA */
+			bias-disable; /* No PULL */
+		};
+	};
+};
+
+/*sensors */
+cam_sensor_mclk0_default: cam_sensor_mclk0_default {
+	/* MCLK0 */
+	mux {
+		/* CLK, DATA */
+		pins = "gpio26";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio26";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_mclk0_sleep: cam_sensor_mclk0_sleep {
+	/* MCLK0 */
+	mux {
+		/* CLK, DATA */
+		pins = "gpio26";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio26";
+		bias-pull-down; /* PULL DOWN */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_rear_default: cam_sensor_rear_default {
+	/* RESET, STANDBY */
+	mux {
+		pins = "gpio36", "gpio35";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio36","gpio35";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_rear_sleep: cam_sensor_rear_sleep {
+	/* RESET, STANDBY */
+	mux {
+		pins = "gpio36","gpio35";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio36","gpio35";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_rear_vdig: cam_sensor_rear_vdig {
+	/* VDIG */
+	mux {
+		pins = "gpio62";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio62";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_rear_vdig_sleep: cam_sensor_rear_vdig_sleep {
+	/* VDIG */
+	mux {
+		pins = "gpio62";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio62";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_mclk1_default: cam_sensor_mclk1_default {
+	/* MCLK1 */
+	mux {
+		/* CLK, DATA */
+		pins = "gpio27";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio27";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_mclk1_sleep: cam_sensor_mclk1_sleep {
+	/* MCLK1 */
+	mux {
+		/* CLK, DATA */
+		pins = "gpio27";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio27";
+		bias-pull-down; /* PULL DOWN */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_front_default: cam_sensor_front_default {
+	/* RESET, STANDBY */
+	mux {
+		pins = "gpio38","gpio50";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio38","gpio50";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_front_sleep: cam_sensor_front_sleep {
+	/* RESET, STANDBY */
+	mux {
+		pins = "gpio38","gpio50";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio38","gpio50";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_mclk2_default: cam_sensor_mclk2_default {
+	/* MCLK2 */
+	mux {
+		/* CLK, DATA */
+		pins = "gpio28";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio28";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_mclk2_sleep: cam_sensor_mclk2_sleep {
+	/* MCLK2 */
+	mux {
+		/* CLK, DATA */
+		pins = "gpio28";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio28";
+		bias-pull-down; /* PULL DOWN */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_front1_default: cam_sensor_front1_default {
+	/* RESET, STANDBY */
+	mux {
+		pins = "gpio40", "gpio39";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio40", "gpio39";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+cam_sensor_front1_sleep: cam_sensor_front1_sleep {
+	/* RESET, STANDBY */
+	mux {
+		pins = "gpio40", "gpio39";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio40", "gpio39";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/msm8917-camera-sensor-qrd.dtsi
new file mode 100644
index 0000000..dbaccfa
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-camera-sensor-qrd.dtsi
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&cci {
+	actuator0: qcom,actuator@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,actuator";
+		qcom,cci-master = <0>;
+		cam_vaf-supply = <&pm8937_l17>;
+		qcom,cam-vreg-name = "cam_vaf";
+		qcom,cam-vreg-min-voltage = <2850000>;
+		qcom,cam-vreg-max-voltage = <2850000>;
+		qcom,cam-vreg-op-mode = <80000>;
+	};
+
+	eeprom0: qcom,eeprom@0 {
+		cell-index = <0>;
+		compatible = "qcom,eeprom";
+		qcom,cci-master = <0>;
+		reg = <0x0>;
+		cam_vdig-supply = <&pm8937_l23>;
+		cam_vana-supply = <&pm8937_l22>;
+		cam_vio-supply = <&pm8937_l6>;
+		cam_vaf-supply = <&pm8937_l17>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+							"cam_vaf";
+		qcom,cam-vreg-min-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-op-mode = <200000 0 80000 100000>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_default
+				&cam_sensor_rear_default>;
+		pinctrl-1 = <&cam_sensor_mclk0_sleep &cam_sensor_rear_sleep>;
+		gpios = <&tlmm 26 0>,
+			<&tlmm 36 0>,
+			<&tlmm 35 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+			"CAM_RESET0",
+			"CAM_STANDBY0";
+		status = "ok";
+		clocks = <&clock_gcc clk_mclk0_clk_src>,
+				<&clock_gcc clk_gcc_camss_mclk0_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <19200000 0>;
+	};
+
+	eeprom2: qcom,eeprom@2 {
+		cell-index = <2>;
+		compatible = "qcom,eeprom";
+		reg = <0x02>;
+		cam_vdig-supply = <&pm8937_l23>;
+		cam_vana-supply = <&pm8937_l22>;
+		cam_vio-supply = <&pm8937_l6>;
+		cam_vaf-supply = <&pm8937_l17>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+					"cam_vaf";
+		qcom,cam-vreg-min-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-op-mode = <105000 0 80000 100000>;
+		qcom,gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_default
+				&cam_sensor_front1_default>;
+		pinctrl-1 = <&cam_sensor_mclk2_sleep
+				&cam_sensor_front1_sleep>;
+		gpios = <&tlmm 28 0>,
+			<&tlmm 40 0>,
+			<&tlmm 39 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+					  "CAM_RESET2",
+					  "CAM_STANDBY2";
+		qcom,cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_gcc clk_mclk2_clk_src>,
+			<&clock_gcc clk_gcc_camss_mclk2_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <19200000 0>;
+	};
+
+	qcom,camera@0 {
+		cell-index = <0>;
+		compatible = "qcom,camera";
+		reg = <0x0>;
+		qcom,csiphy-sd-index = <0>;
+		qcom,csid-sd-index = <0>;
+		qcom,mount-angle = <90>;
+		qcom,actuator-src = <&actuator0>;
+		qcom,led-flash-src = <&led_flash0>;
+		qcom,eeprom-src = <&eeprom0>;
+		cam_vdig-supply = <&pm8937_l23>;
+		cam_vana-supply = <&pm8937_l22>;
+		cam_vio-supply = <&pm8937_l6>;
+		cam_vaf-supply = <&pm8937_l17>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+							"cam_vaf";
+		qcom,cam-vreg-min-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-op-mode = <200000 0 80000 100000>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_default
+				&cam_sensor_rear_default>;
+		pinctrl-1 = <&cam_sensor_mclk0_sleep
+				&cam_sensor_rear_sleep>;
+		gpios = <&tlmm 26 0>,
+			<&tlmm 36 0>,
+			<&tlmm 35 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+			"CAM_RESET0",
+			"CAM_STANDBY0";
+		qcom,sensor-position = <0>;
+		qcom,sensor-mode = <0>;
+		qcom,cci-master = <0>;
+		clocks = <&clock_gcc clk_mclk0_clk_src>,
+			<&clock_gcc clk_gcc_camss_mclk0_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <24000000 0>;
+	};
+
+	qcom,camera@2 {
+		cell-index = <2>;
+		compatible = "qcom,camera";
+		reg = <0x02>;
+		qcom,csiphy-sd-index = <1>;
+		qcom,csid-sd-index = <1>;
+		qcom,eeprom-src = <&eeprom2>;
+		qcom,mount-angle = <270>;
+		cam_vdig-supply = <&pm8937_l23>;
+		cam_vana-supply = <&pm8937_l22>;
+		cam_vio-supply = <&pm8937_l6>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+		qcom,cam-vreg-min-voltage = <1200000 0 2800000>;
+		qcom,cam-vreg-max-voltage = <1200000 0 2800000>;
+		qcom,cam-vreg-op-mode = <105000 0 80000>;
+		qcom,gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_default
+				&cam_sensor_front1_default>;
+		pinctrl-1 = <&cam_sensor_mclk2_sleep
+				&cam_sensor_front1_sleep>;
+		gpios = <&tlmm 28 0>,
+			<&tlmm 40 0>,
+			<&tlmm 39 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+					  "CAM_RESET2",
+					  "CAM_STANDBY2";
+		qcom,sensor-position = <1>;
+		qcom,sensor-mode = <0>;
+		qcom,cci-master = <0>;
+		clocks = <&clock_gcc clk_mclk2_clk_src>,
+			<&clock_gcc clk_gcc_camss_mclk2_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <24000000 0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-camera.dtsi b/arch/arm64/boot/dts/qcom/msm8917-camera.dtsi
new file mode 100644
index 0000000..4991ff7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-camera.dtsi
@@ -0,0 +1,533 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,msm-cam@1b00000 {
+		compatible = "qcom,msm-cam";
+		reg = <0x1b00000 0x40000>;
+		reg-names = "msm-cam";
+		status = "ok";
+		bus-vectors = "suspend", "svs", "nominal", "turbo";
+		qcom,bus-votes = <0 160000000 320000000 320000000>;
+	};
+
+	qcom,csiphy@1b34000 {
+		status = "ok";
+		cell-index = <0>;
+		compatible = "qcom,csiphy-v3.4.2", "qcom,csiphy";
+		reg = <0x1b34000 0x1000>,
+			<0x1b00030 0x4>;
+		reg-names = "csiphy", "csiphy_clk_mux";
+		interrupts = <0 78 0>;
+		interrupt-names = "csiphy";
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_csi0phytimer_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi0phytimer_clk>,
+			<&clock_gcc clk_camss_top_ahb_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi0phy_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		clock-names = "camss_top_ahb_clk", "ispif_ahb_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ahb_src", "csi_phy_clk",
+			"camss_ahb_clk";
+		qcom,clock-rates = <0 61540000 200000000 0 0 0 0>;
+	};
+
+	qcom,csiphy@1b35000 {
+		status = "ok";
+		cell-index = <1>;
+		compatible = "qcom,csiphy-v3.4.2", "qcom,csiphy";
+		reg = <0x1b35000 0x1000>,
+			<0x1b00038 0x4>;
+		reg-names = "csiphy", "csiphy_clk_mux";
+		interrupts = <0 79 0>;
+		interrupt-names = "csiphy";
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_csi1phytimer_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi1phytimer_clk>,
+			<&clock_gcc clk_camss_top_ahb_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi1phy_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		clock-names = "camss_top_ahb_clk", "ispif_ahb_clk",
+			"csiphy_timer_src_clk", "csiphy_timer_clk",
+			"camss_ahb_src", "csi_phy_clk",
+			"camss_ahb_clk";
+		qcom,clock-rates = <0 61540000 200000000 0 0 0 0>;
+	};
+
+	qcom,csid@1b30000  {
+		status = "ok";
+		cell-index = <0>;
+		compatible = "qcom,csid-v3.4.3", "qcom,csid";
+		reg = <0x1b30000 0x400>;
+		reg-names = "csid";
+		interrupts = <0 51 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8937_l2>;
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_csi0_ahb_clk>,
+			<&clock_gcc clk_csi0_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi0_clk>,
+			<&clock_gcc clk_gcc_camss_csi0pix_clk>,
+			<&clock_gcc clk_gcc_camss_csi0rdi_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		clock-names = "camss_top_ahb_clk",
+			"ispif_ahb_clk", "csi_ahb_clk", "csi_src_clk",
+			"csi_clk",  "csi_pix_clk",
+			"csi_rdi_clk", "camss_ahb_clk";
+		qcom,clock-rates = <0 61540000 0 200000000 0 0 0 0>;
+	};
+
+	qcom,csid@1b30400 {
+		status = "ok";
+		cell-index = <1>;
+		compatible = "qcom,csid-v3.4.3", "qcom,csid";
+		reg = <0x1b30400 0x400>;
+		reg-names = "csid";
+		interrupts = <0 52 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8937_l2>;
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_csi1_ahb_clk>,
+			<&clock_gcc clk_csi1_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi1_clk>,
+			<&clock_gcc clk_gcc_camss_csi1pix_clk>,
+			<&clock_gcc clk_gcc_camss_csi1rdi_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		clock-names = "camss_top_ahb_clk",
+			"ispif_ahb_clk", "csi_ahb_clk", "csi_src_clk",
+			"csi_clk", "csi_pix_clk",
+			"csi_rdi_clk", "camss_ahb_clk";
+		qcom,clock-rates = <0 61540000 0 200000000 0 0 0 0>;
+	};
+
+	qcom,csid@1b30800 {
+		status = "ok";
+		cell-index = <2>;
+		compatible = "qcom,csid-v3.4.3", "qcom,csid";
+		reg = <0x1b30800 0x400>;
+		reg-names = "csid";
+		interrupts = <0 153 0>;
+		interrupt-names = "csid";
+		qcom,csi-vdd-voltage = <1200000>;
+		qcom,mipi-csi-vdd-supply = <&pm8937_l2>;
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_csi2_ahb_clk>,
+			<&clock_gcc clk_csi2_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi2_clk>,
+			<&clock_gcc clk_gcc_camss_csi2pix_clk>,
+			<&clock_gcc clk_gcc_camss_csi2rdi_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		clock-names = "camss_top_ahb_clk",
+			"ispif_ahb_clk", "csi_ahb_clk", "csi_src_clk",
+			"csi_clk", "csi_pix_clk",
+			"csi_rdi_clk", "camss_ahb_clk";
+		qcom,clock-rates = <0 61540000 0 200000000 0 0 0 0>;
+	};
+
+	qcom,ispif@1b31000 {
+		cell-index = <0>;
+		compatible = "qcom,ispif-v3.0", "qcom,ispif";
+		reg = <0x1b31000 0x500>,
+			<0x1b00020 0x10>;
+		reg-names = "ispif", "csi_clk_mux";
+		interrupts = <0 55 0>;
+		interrupt-names = "ispif";
+		qcom,num-isps = <0x2>;
+		vfe0-vdd-supply = <&gdsc_vfe>;
+		vfe1-vdd-supply = <&gdsc_vfe1>;
+		qcom,vdd-names = "vfe0-vdd", "vfe1-vdd";
+		clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_camss_top_ahb_clk_src>,
+			<&clock_gcc clk_csi0_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi0_clk>,
+			<&clock_gcc clk_gcc_camss_csi0rdi_clk>,
+			<&clock_gcc clk_gcc_camss_csi0pix_clk>,
+			<&clock_gcc clk_csi1_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi1_clk>,
+			<&clock_gcc clk_gcc_camss_csi1rdi_clk>,
+			<&clock_gcc clk_gcc_camss_csi1pix_clk>,
+			<&clock_gcc clk_csi2_clk_src>,
+			<&clock_gcc clk_gcc_camss_csi2_clk>,
+			<&clock_gcc clk_gcc_camss_csi2rdi_clk>,
+			<&clock_gcc clk_gcc_camss_csi2pix_clk>,
+			<&clock_gcc clk_vfe0_clk_src>,
+			<&clock_gcc clk_gcc_camss_vfe0_clk>,
+			<&clock_gcc clk_gcc_camss_csi_vfe0_clk>,
+			<&clock_gcc clk_vfe1_clk_src>,
+			<&clock_gcc clk_gcc_camss_vfe1_clk>,
+			<&clock_gcc clk_gcc_camss_csi_vfe1_clk>;
+		clock-names = "ispif_ahb_clk",
+			"camss_ahb_clk", "camss_top_ahb_clk",
+			"camss_ahb_src",
+			"csi0_src_clk", "csi0_clk",
+			"csi0_rdi_clk", "csi0_pix_clk",
+			"csi1_src_clk", "csi1_clk",
+			"csi1_rdi_clk", "csi1_pix_clk",
+			"csi2_src_clk", "csi2_clk",
+			"csi2_rdi_clk", "csi2_pix_clk",
+			"vfe0_clk_src", "camss_vfe_vfe0_clk",
+			"camss_csi_vfe0_clk", "vfe1_clk_src",
+			"camss_vfe_vfe1_clk", "camss_csi_vfe1_clk";
+		qcom,clock-rates = <61540000 0 0 0
+			200000000 0 0 0
+			200000000 0 0 0
+			200000000 0 0 0
+			0 0 0
+			0 0 0>;
+		qcom,clock-cntl-support;
+		qcom,clock-control = "SET_RATE","NO_SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "NO_SET_RATE", "SET_RATE",
+			"NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
+			"SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "INIT_RATE", "NO_SET_RATE",
+			"NO_SET_RATE", "INIT_RATE", "NO_SET_RATE",
+			"NO_SET_RATE";
+	};
+
+	vfe0: qcom,vfe0@1b10000 {
+		cell-index = <0>;
+		compatible = "qcom,vfe40";
+		reg = <0x1b10000 0x1000>,
+			<0x1b40000 0x200>;
+		reg-names = "vfe", "vfe_vbif";
+		interrupts = <0 57 0>;
+		interrupt-names = "vfe";
+		vdd-supply = <&gdsc_vfe>;
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>,
+			<&clock_gcc clk_vfe0_clk_src>,
+			<&clock_gcc clk_gcc_camss_vfe0_clk>,
+			<&clock_gcc clk_gcc_camss_csi_vfe0_clk>,
+			<&clock_gcc clk_gcc_camss_vfe_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_vfe_axi_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>;
+		clock-names = "camss_top_ahb_clk", "camss_ahb_clk",
+			"vfe_clk_src", "camss_vfe_vfe_clk",
+			"camss_csi_vfe_clk", "iface_clk",
+			"bus_clk", "iface_ahb_clk";
+		qcom,clock-rates = <0 0 266670000 0 0 0 0 0>;
+		qos-entries = <8>;
+		qos-regs = <0x2c4 0x2c8 0x2cc 0x2d0 0x2d4 0x2d8
+			0x2dc 0x2e0>;
+		qos-settings = <0xaa55aa55
+			0xaa55aa55 0xaa55aa55
+			0xaa55aa55 0xaa55aa55
+			0xaa55aa55 0xaa55aa55
+			0xaa55aa55>;
+		vbif-entries = <1>;
+		vbif-regs = <0x124>;
+		vbif-settings = <0x3>;
+		ds-entries = <17>;
+		ds-regs = <0x988 0x98c 0x990 0x994 0x998
+			0x99c 0x9a0 0x9a4 0x9a8 0x9ac 0x9b0
+			0x9b4 0x9b8 0x9bc 0x9c0 0x9c4 0x9c8>;
+		ds-settings = <0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0x00000110>;
+		max-clk-nominal = <400000000>;
+		max-clk-turbo = <432000000>;
+	};
+
+	vfe1: qcom,vfe1@1b14000 {
+		cell-index = <1>;
+		compatible = "qcom,vfe40";
+		reg = <0x1b14000 0x1000>,
+			<0x1ba0000 0x200>;
+		reg-names = "vfe", "vfe_vbif";
+		interrupts = <0 29 0>;
+		interrupt-names = "vfe";
+		vdd-supply = <&gdsc_vfe1>;
+		clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>,
+			<&clock_gcc clk_vfe1_clk_src>,
+			<&clock_gcc clk_gcc_camss_vfe1_clk>,
+			<&clock_gcc clk_gcc_camss_csi_vfe1_clk>,
+			<&clock_gcc clk_gcc_camss_vfe1_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_vfe1_axi_clk>,
+			<&clock_gcc clk_gcc_camss_ispif_ahb_clk>;
+		clock-names = "camss_top_ahb_clk" , "camss_ahb_clk",
+			"vfe_clk_src", "camss_vfe_vfe_clk",
+			"camss_csi_vfe_clk", "iface_clk",
+			"bus_clk", "iface_ahb_clk";
+		qcom,clock-rates = <0 0 266670000 0 0 0 0 0>;
+		qos-entries = <8>;
+		qos-regs = <0x2c4 0x2c8 0x2cc 0x2d0 0x2d4 0x2d8
+			0x2dc 0x2e0>;
+		qos-settings = <0xaa55aa55
+			0xaa55aa55 0xaa55aa55
+			0xaa55aa55 0xaa55aa55
+			0xaa55aa55 0xaa55aa55
+			0xaa55aa55>;
+		vbif-entries = <1>;
+		vbif-regs = <0x124>;
+		vbif-settings = <0x3>;
+		ds-entries = <17>;
+		ds-regs = <0x988 0x98c 0x990 0x994 0x998
+			0x99c 0x9a0 0x9a4 0x9a8 0x9ac 0x9b0
+			0x9b4 0x9b8 0x9bc 0x9c0 0x9c4 0x9c8>;
+		ds-settings = <0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0xcccc1111
+			0xcccc1111 0x00000110>;
+		max-clk-nominal = <400000000>;
+		max-clk-turbo = <432000000>;
+	};
+
+	qcom,vfe {
+		compatible = "qcom,vfe";
+		num_child = <2>;
+	};
+
+	qcom,cam_smmu {
+		status = "ok";
+		compatible = "qcom,msm-cam-smmu";
+		msm_cam_smmu_cb1: msm_cam_smmu_cb1 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_iommu 0x400 0x00>,
+				<&apps_iommu 0x2400 0x00>;
+			label = "vfe";
+			qcom,scratch-buf-support;
+		};
+
+		msm_cam_smmu_cb2: msm_cam_smmu_cb2 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			label = "vfe_secure";
+			qcom,secure-context;
+		};
+
+		msm_cam_smmu_cb3: msm_cam_smmu_cb3 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_iommu 0x1c00 0x00>;
+			label = "cpp";
+		};
+
+		msm_cam_smmu_cb4: msm_cam_smmu_cb4 {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_iommu 0x1800 0x00>;
+			label = "jpeg_enc0";
+		};
+	};
+
+	qcom,jpeg@1b1c000 {
+		status = "ok";
+		cell-index = <0>;
+		compatible = "qcom,jpeg";
+		reg = <0x1b1c000 0x400>,
+			<0x1b60000 0xc30>;
+		reg-names = "jpeg_hw", "jpeg_vbif";
+		interrupts = <0 59 0>;
+		interrupt-names = "jpeg";
+		vdd-supply = <&gdsc_jpeg>;
+		qcom,vdd-names = "vdd";
+		clock-names =  "core_clk", "iface_clk", "bus_clk0",
+			"camss_top_ahb_clk", "camss_ahb_clk";
+		clocks = <&clock_gcc clk_gcc_camss_jpeg0_clk>,
+			<&clock_gcc clk_gcc_camss_jpeg_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_jpeg_axi_clk>,
+			<&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		qcom,clock-rates = <266670000 0 0 0 0>;
+		qcom,qos-reg-settings = <0x28 0x0000555e>,
+			<0xc8 0x00005555>;
+		qcom,msm-bus,name = "msm_camera_jpeg0";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps = <62 512 0 0>,
+			<62 512 800000 800000>;
+		qcom,vbif-reg-settings = <0xc0 0x10101000>,
+			<0xb0 0x10100010>;
+	};
+
+	qcom,irqrouter@1b00000 {
+		status = "ok";
+		cell-index = <0>;
+		compatible = "qcom,irqrouter";
+		reg = <0x1b00000 0x100>;
+		reg-names = "irqrouter";
+	};
+
+	qcom,cpp@1b04000 {
+		status = "ok";
+		cell-index = <0>;
+		compatible = "qcom,cpp";
+		reg = <0x1b04000 0x100>,
+			<0x1b80000 0x200>,
+			<0x1b18000 0x018>,
+			<0x1858078 0x4>;
+		reg-names = "cpp", "cpp_vbif", "cpp_hw", "camss_cpp";
+		interrupts = <0 49 0>;
+		interrupt-names = "cpp";
+		vdd-supply = <&gdsc_cpp>;
+		qcom,vdd-names = "vdd";
+		clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_cpp_clk_src>,
+			<&clock_gcc clk_gcc_camss_top_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_cpp_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_cpp_axi_clk>,
+			<&clock_gcc clk_gcc_camss_cpp_clk>,
+			<&clock_gcc clk_gcc_camss_micro_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>;
+		clock-names = "ispif_ahb_clk", "cpp_core_clk",
+			"camss_top_ahb_clk", "camss_vfe_cpp_ahb_clk",
+			"camss_vfe_cpp_axi_clk", "camss_vfe_cpp_clk",
+			"micro_iface_clk", "camss_ahb_clk";
+		qcom,clock-rates = <61540000 180000000 0 0 0 180000000 0 0>;
+		qcom,min-clock-rate = <133000000>;
+		resets = <&clock_gcc GCC_CAMSS_MICRO_BCR>;
+		reset-names = "micro_iface_reset";
+		qcom,bus-master = <1>;
+		qcom,msm-bus,name = "msm_camera_cpp";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<106 512 0 0>,
+			<106 512 0 0>;
+		qcom,msm-bus-vector-dyn-vote;
+		qcom,micro-reset;
+		qcom,cpp-fw-payload-info {
+			qcom,stripe-base = <156>;
+			qcom,plane-base = <141>;
+			qcom,stripe-size = <27>;
+			qcom,plane-size = <5>;
+			qcom,fe-ptr-off = <5>;
+			qcom,we-ptr-off = <11>;
+		};
+	};
+
+	cci: qcom,cci@1b0c000 {
+		status = "ok";
+		cell-index = <0>;
+		compatible = "qcom,cci";
+		reg = <0x1b0c000 0x4000>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "cci";
+		interrupts = <0 50 0>;
+		interrupt-names = "cci";
+		clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+			<&clock_gcc clk_cci_clk_src>,
+			<&clock_gcc clk_gcc_camss_cci_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_cci_clk>,
+			<&clock_gcc clk_gcc_camss_ahb_clk>,
+			<&clock_gcc clk_gcc_camss_top_ahb_clk>;
+		clock-names = "ispif_ahb_clk", "cci_src_clk",
+			"cci_ahb_clk", "camss_cci_clk",
+			"camss_ahb_clk", "camss_top_ahb_clk";
+		qcom,clock-rates = <61540000 19200000 0 0 0 0>,
+				<61540000 37500000 0 0 0 0>;
+		pinctrl-names = "cci_default", "cci_suspend";
+			pinctrl-0 = <&cci0_active &cci1_active>;
+			pinctrl-1 = <&cci0_suspend &cci1_suspend>;
+		gpios = <&tlmm 29 0>,
+			<&tlmm 30 0>,
+			<&tlmm 31 0>,
+			<&tlmm 32 0>;
+		qcom,gpio-tbl-num = <0 1 2 3>;
+		qcom,gpio-tbl-flags = <1 1 1 1>;
+		qcom,gpio-tbl-label = "CCI_I2C_DATA0",
+						"CCI_I2C_CLK0",
+						"CCI_I2C_DATA1",
+						"CCI_I2C_CLK1";
+		i2c_freq_100Khz: qcom,i2c_standard_mode {
+			status = "disabled";
+		};
+		i2c_freq_400Khz: qcom,i2c_fast_mode {
+			status = "disabled";
+		};
+		i2c_freq_custom: qcom,i2c_custom_mode {
+			status = "disabled";
+		};
+
+		i2c_freq_1Mhz: qcom,i2c_fast_plus_mode {
+			status = "disabled";
+		};
+	};
+};
+
+&i2c_freq_100Khz {
+	qcom,hw-thigh = <78>;
+	qcom,hw-tlow = <114>;
+	qcom,hw-tsu-sto = <28>;
+	qcom,hw-tsu-sta = <28>;
+	qcom,hw-thd-dat = <10>;
+	qcom,hw-thd-sta = <77>;
+	qcom,hw-tbuf = <118>;
+	qcom,hw-scl-stretch-en = <0>;
+	qcom,hw-trdhld = <6>;
+	qcom,hw-tsp = <1>;
+};
+
+&i2c_freq_400Khz {
+	qcom,hw-thigh = <20>;
+	qcom,hw-tlow = <28>;
+	qcom,hw-tsu-sto = <21>;
+	qcom,hw-tsu-sta = <21>;
+	qcom,hw-thd-dat = <13>;
+	qcom,hw-thd-sta = <18>;
+	qcom,hw-tbuf = <32>;
+	qcom,hw-scl-stretch-en = <0>;
+	qcom,hw-trdhld = <6>;
+	qcom,hw-tsp = <3>;
+	status = "ok";
+};
+
+&i2c_freq_custom {
+	qcom,hw-thigh = <15>;
+	qcom,hw-tlow = <28>;
+	qcom,hw-tsu-sto = <21>;
+	qcom,hw-tsu-sta = <21>;
+	qcom,hw-thd-dat = <13>;
+	qcom,hw-thd-sta = <18>;
+	qcom,hw-tbuf = <25>;
+	qcom,hw-scl-stretch-en = <1>;
+	qcom,hw-trdhld = <6>;
+	qcom,hw-tsp = <3>;
+	status = "ok";
+};
+
+&i2c_freq_1Mhz {
+	qcom,hw-thigh = <16>;
+	qcom,hw-tlow = <22>;
+	qcom,hw-tsu-sto = <17>;
+	qcom,hw-tsu-sta = <18>;
+	qcom,hw-thd-dat = <16>;
+	qcom,hw-thd-sta = <15>;
+	qcom,hw-tbuf = <19>;
+	qcom,hw-scl-stretch-en = <1>;
+	qcom,hw-trdhld = <3>;
+	qcom,hw-tsp = <3>;
+	qcom,cci-clk-src = <37500000>;
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-cdp-mirror-lake-touch.dtsi b/arch/arm64/boot/dts/qcom/msm8917-cdp-mirror-lake-touch.dtsi
index d6d85fa..1f19e20 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-cdp-mirror-lake-touch.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-cdp-mirror-lake-touch.dtsi
@@ -141,6 +141,12 @@
 	qcom,panel-roi-alignment = <2 2 2 2 2 2>;
 };
 
+&dsi_icn9706_720_1440_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
+};
+
 &tlmm {
 	tlmm_gpio_key {
 		gpio_key_active: gpio_key_active {
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi
index b9229e1..858936d 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi
@@ -15,13 +15,14 @@
 	tlmm: pinctrl@1000000 {
 		compatible = "qcom,msm8917-pinctrl";
 		reg = <0x1000000 0x300000>;
-		interrupts = <0 208 0>;
+		interrupts-extended = <&wakegic GIC_SPI 208 IRQ_TYPE_NONE>;
 		gpio-controller;
 		#gpio-cells = <2>;
 		interrupt-controller;
+		interrupt-parent = <&wakegpio>;
 		#interrupt-cells = <2>;
 
-
+#include "msm8917-camera-pinctrl.dtsi"
 		/* add pingrp for touchscreen */
 		pmx_ts_int_active {
 			ts_int_active: ts_int_active {
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pm.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pm.dtsi
index 575d1b5..a3b4679 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-pm.dtsi
@@ -31,7 +31,6 @@
 
 	qcom,lpm-levels {
 		compatible = "qcom,lpm-levels";
-		qcom,use-psci;
 		#address-cells = <1>;
 		#size-cells = <0>;
 
@@ -40,8 +39,6 @@
 			#address-cells = <1>;
 			#size-cells = <0>;
 			label = "perf";
-			qcom,spm-device-names = "l2";
-			qcom,default-level=<0>;
 			qcom,psci-mode-shift = <4>;
 			qcom,psci-mode-mask = <0xf>;
 
@@ -98,11 +95,12 @@
 				#size-cells = <0>;
 				qcom,psci-mode-shift = <0>;
 				qcom,psci-mode-mask = <0xf>;
+				qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>;
 
 				qcom,pm-cpu-level@0 {
 					reg = <0>;
-					qcom,psci-cpu-mode = <0>;
 					label = "wfi";
+					qcom,psci-cpu-mode = <1>;
 					qcom,latency-us = <12>;
 					qcom,ss-power = <463>;
 					qcom,energy-overhead = <23520>;
@@ -111,8 +109,8 @@
 
 				qcom,pm-cpu-level@1 {
 					reg = <1>;
-					qcom,psci-cpu-mode = <3>;
 					label = "pc";
+					qcom,psci-cpu-mode = <3>;
 					qcom,latency-us = <180>;
 					qcom,ss-power = <429>;
 					qcom,energy-overhead = <162991>;
@@ -125,204 +123,10 @@
 		};
 	};
 
-	qcom,mpm@601d0 {
-		compatible = "qcom,mpm-v2";
-		reg = <0x601d0 0x1000>, /* MSM_RPM_MPM_BASE 4K */
-		      <0xb011008 0x4>;
-		reg-names = "vmpm", "ipc";
-		interrupts = <0 171 1>;
-		clocks = <&clock_gcc clk_xo_lpm_clk>;
-		clock-names = "xo";
-		qcom,ipc-bit-offset = <1>;
-		qcom,gic-parent = <&intc>;
-		qcom,gic-map = <2 216>, /* tsens_upper_lower_int */
-			<49 172>, /* usb1_hs_async_wakeup_irq */
-			<58 166>, /* usb_hs_irq */
-			<53 104>, /* mdss_irq */
-			<62 222>, /* ee0_krait_hlos_spmi_periph_irq */
-			<0xff 18>,  /* APC_qgicQTmrSecPhysIrptReq */
-			<0xff 19>,  /* APC_qgicQTmrNonSecPhysIrptReq */
-			<0xff 20>,  /* qgicQTmrVirtIrptReq */
-			<0xff 35>,  /* WDT_barkInt */
-			<0xff 39>,  /* arch_mem_timer */
-			<0xff 40>,  /* qtmr_phy_irq[0] */
-			<0xff 47>,  /* rbif_irq[0] */
-			<0xff 56>,  /* q6_wdog_expired_irq */
-			<0xff 57>,  /* mss_to_apps_irq(0) */
-			<0xff 58>,  /* mss_to_apps_irq(1) */
-			<0xff 59>,  /* mss_to_apps_irq(2) */
-			<0xff 60>,  /* mss_to_apps_irq(3) */
-			<0xff 61>,  /* mss_a2_bam_irq */
-			<0xff 65>,  /* o_gc_sys_irq[0] */
-			<0xff 69>,  /* vbif_irpt */
-			<0xff 73>,  /* smmu_intr_bus[1] */
-			<0xff 74>,  /* smmu_bus_intr[2] */
-			<0xff 75>,  /* smmu_bus_intr[3] */
-			<0xff 76>,  /* venus_irq */
-			<0xff 78>,  /* smmu_bus_intr[5] */
-			<0xff 79>,  /* smmu_bus_intr[6] */
-			<0xff 85>,  /* smmu_bus_intr[31] */
-			<0xff 86>,  /* smmu_bus_intr[32] */
-			<0xff 90>,  /* smmu_bus_intr[33] */
-			<0xff 92>,  /* smmu_bus_intr[34] */
-			<0xff 93>,  /* smmu_bus_intr[35] */
-			<0xff 97>,  /* smmu_bus_intr[10] */
-			<0xff 102>, /* smmu_bus_intr[14] */
-			<0xff 108>, /* smmu_bus_intr[36] */
-			<0xff 109>, /* smmu_bus_intr[37] */
-			<0xff 112>, /* smmu_bus_intr[38] */
-			<0xff 114>, /* qdsd_intr_out */
-			<0xff 126>, /* smmu_bus_intr[39] */
-			<0xff 128>, /* blsp1_peripheral_irq[3] */
-			<0xff 129>, /* blsp1_peripheral_irq[4] */
-			<0xff 131>, /* qup_irq */
-			<0xff 136>, /* smmu_bus_intr[43] */
-			<0xff 137>, /* smmu_intr_bus[44] */
-			<0xff 138>, /* smmu_intr_bus[45] */
-			<0xff 140>, /* uart_dm_intr */
-			<0xff 141>, /* smmu_bus_intr[46] */
-			<0xff 142>, /* smmu_bus_intr[47] */
-			<0xff 143>, /* smmu_bus_intr[48] */
-			<0xff 144>, /* smmu_bus_intr[49] */
-			<0xff 145>, /* smmu_bus_intr[50] */
-			<0xff 146>, /* smmu_bus_intr[51] */
-			<0xff 147>, /* smmu_bus_intr[52] */
-			<0xff 148>, /* smmu_bus_intr[53] */
-			<0xff 149>, /* smmu_bus_intr[54] */
-			<0xff 150>, /* smmu_bus_intr[55] */
-			<0xff 151>, /* smmu_bus_intr[56] */
-			<0xff 152>, /* smmu_bus_intr[57] */
-			<0xff 153>, /* smmu_bus_intr[58] */
-			<0xff 155>, /* sdc1_irq(0) */
-			<0xff 157>, /* sdc2_irq(0) */
-			<0xff 167>, /* bam_irq(0) */
-			<0xff 170>, /* sdc1_pwr_cmd_irq */
-			<0xff 173>, /* o_wcss_apss_smd_hi */
-			<0xff 174>, /* o_wcss_apss_smd_med */
-			<0xff 175>, /* o_wcss_apss_smd_low */
-			<0xff 176>, /* o_wcss_apss_smsm_irq */
-			<0xff 177>, /* o_wcss_apss_wlan_data_xfer_done */
-			<0xff 178>, /* o_wcss_apss_wlan_rx_data_avail */
-			<0xff 179>, /* o_wcss_apss_asic_intr */
-			<0xff 181>, /* o_wcss_apss_wdog_bite_and_reset_rdy */
-			<0xff 188>, /* lpass_irq_out_apcs(0) */
-			<0xff 189>, /* lpass_irq_out_apcs(1) */
-			<0xff 190>, /* lpass_irq_out_apcs(2) */
-			<0xff 191>, /* lpass_irq_out_apcs(3) */
-			<0xff 192>, /* lpass_irq_out_apcs(4) */
-			<0xff 193>, /* lpass_irq_out_apcs(5) */
-			<0xff 194>, /* lpass_irq_out_apcs(6) */
-			<0xff 195>, /* lpass_irq_out_apcs(7) */
-			<0xff 196>, /* lpass_irq_out_apcs(8) */
-			<0xff 197>, /* lpass_irq_out_apcs(9) */
-			<0xff 198>, /* coresight-tmc-etr interrupt */
-			<0xff 200>, /* rpm_ipc(4) */
-			<0xff 201>, /* rpm_ipc(5) */
-			<0xff 202>, /* rpm_ipc(6) */
-			<0xff 203>, /* rpm_ipc(7) */
-			<0xff 204>, /* rpm_ipc(24) */
-			<0xff 205>, /* rpm_ipc(25) */
-			<0xff 206>, /* rpm_ipc(26) */
-			<0xff 207>, /* rpm_ipc(27) */
-			<0xff 215>, /* o_bimc_intr[0] */
-			<0xff 224>, /* SPDM interrupt */
-			<0xff 239>, /* crypto_bam_irq[1]*/
-			<0xff 240>, /* summary_irq_kpss */
-			<0xff 253>, /* sdcc_pwr_cmd_irq */
-			<0xff 260>, /* ipa_irq[0] */
-			<0xff 261>, /* ipa_irq[2] */
-			<0xff 262>, /* ipa_bam_irq[0] */
-			<0xff 263>, /* ipa_bam_irq[2] */
-			<0xff 269>, /* rpm_wdog_expired_irq */
-			<0xff 270>, /* blsp1_bam_irq[0] */
-			<0xff 272>, /* smmu_intr_bus[17] */
-			<0xff 273>, /* smmu_bus_intr[18] */
-			<0xff 274>, /* smmu_bus_intr[19] */
-			<0xff 275>, /* rpm_ipc(30) */
-			<0xff 276>, /* rpm_ipc(31) */
-			<0xff 277>, /* smmu_intr_bus[20] */
-			<0xff 285>, /* smmu_bus_intr[28] */
-			<0xff 286>, /* smmu_bus_intr[29] */
-			<0xff 287>, /* smmu_bus_intr[30] */
-			<0xff 321>, /* q6ss_irq_out(4) */
-			<0xff 322>, /* q6ss_irq_out(5) */
-			<0xff 323>, /* q6ss_irq_out(6) */
-			<0xff 325>, /* q6ss_wdog_exp_irq */
-			<0xff 344>; /* sdcc1ice */
-
-		qcom,gpio-parent = <&tlmm>;
-		qcom,gpio-map = <3  38 >,
-			<4  1 >,
-			<5  5 >,
-			<6  9 >,
-			<8  37>,
-			<9  36>,
-			<10  13>,
-			<11  35>,
-			<12  17>,
-			<13  21>,
-			<14  54>,
-			<15  34>,
-			<16  31>,
-			<17  58>,
-			<18  28>,
-			<19  42>,
-			<20  25>,
-			<21  12>,
-			<22  43>,
-			<23  44>,
-			<24  45>,
-			<25  46>,
-			<26  48>,
-			<27  65>,
-			<28  93>,
-			<29  97>,
-			<30  63>,
-			<31  70>,
-			<32  71>,
-			<33  72>,
-			<34  81>,
-			<35  126>,
-			<36  90>,
-			<37  128>,
-			<38  91>,
-			<39  41>,
-			<40  127>,
-			<41  86>,
-			<50  67>,
-			<51  73>,
-			<52  74>,
-			<53  62>,
-			<54  124>,
-			<55  61>,
-			<56  130>,
-			<57  59>,
-			<59  50>;
-	};
-
-	qcom,cpu-sleep-status {
-		compatible = "qcom,cpu-sleep-status";
-	};
-
-	qcom,rpm-log@29dc00 {
-		compatible = "qcom,rpm-log";
-		reg = <0x29dc00 0x4000>;
-		qcom,rpm-addr-phys = <0x200000>;
-		qcom,offset-version = <4>;
-		qcom,offset-page-buffer-addr = <36>;
-		qcom,offset-log-len = <40>;
-		qcom,offset-log-len-mask = <44>;
-		qcom,offset-page-indices = <56>;
-	};
-
 	qcom,rpm-stats@29dba0 {
 		compatible = "qcom,rpm-stats";
-		reg = <0x200000 0x1000>,
-		      <0x290014 0x4>,
-		      <0x29001c 0x4>;
-		reg-names = "phys_addr_base", "offset_addr",
-						"heap_phys_addrbase";
-		qcom,sleep-stats-version = <2>;
+		reg = <0x200000 0x1000>, <0x290014 0x4>;
+		reg-names = "phys_addr_base", "offset_addr";
 	};
 
 	qcom,rpm-master-stats@60150 {
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8937-qrd-sku5.dts b/arch/arm64/boot/dts/qcom/msm8917-pmi8937-qrd-sku5.dts
index 29ef47c..832a0ab 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pmi8937-qrd-sku5.dts
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8937-qrd-sku5.dts
@@ -114,4 +114,13 @@
 		qcom,key-codes = <139 172 158>;
 		qcom,y-offset = <0>;
 	};
+
+	led_flash0: qcom,camera-flash {
+		cell-index = <0>;
+		compatible = "qcom,camera-flash";
+		qcom,flash-type = <1>;
+		qcom,flash-source = <&pmi8937_flash0>;
+		qcom,torch-source = <&pmi8937_torch0>;
+		qcom,switch-source = <&pmi8937_switch>;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8937.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pmi8937.dtsi
index 3b24ab7..91a290f 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pmi8937.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8937.dtsi
@@ -16,7 +16,7 @@
 &qpnp_smbcharger {
 	qcom,chg-led-sw-controls;
 	qcom,chg-led-support;
-	/delete-property/ dpdm-supply;
+	dpdm-supply = <&usb_otg>;
 };
 
 &usb_otg {
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8940.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pmi8940.dtsi
index 528bde4..90226da 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pmi8940.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8940.dtsi
@@ -16,7 +16,7 @@
 &qpnp_smbcharger {
 	qcom,chg-led-sw-controls;
 	qcom,chg-led-support;
-	/delete-property/ dpdm-supply;
+	dpdm-supply = <&usb_otg>;
 };
 
 &usb_otg {
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8950-cdp-mirror-lake-touch.dts b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-cdp-mirror-lake-touch.dts
index ea6b24a..4a38b6f 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pmi8950-cdp-mirror-lake-touch.dts
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-cdp-mirror-lake-touch.dts
@@ -14,6 +14,8 @@
 /dts-v1/;
 
 #include "msm8917.dtsi"
+#include "msm8917-cdp.dtsi"
+#include "msm8917-pmi8950.dtsi"
 #include "msm8917-pmi8950-cdp-mirror-lake-touch.dtsi"
 
 / {
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8950-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-mtp.dtsi
index 4da1384..f882917 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pmi8950-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8950-mtp.dtsi
@@ -59,7 +59,7 @@
 	qcom,battery-data = <&mtp_batterydata>;
 	qcom,chg-led-sw-controls;
 	qcom,chg-led-support;
-	/delete-property/ dpdm-supply;
+	dpdm-supply = <&usb_otg>;
 };
 
 &usb_otg {
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pmi8950.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pmi8950.dtsi
index 9543133..e86b9d7 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pmi8950.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-pmi8950.dtsi
@@ -16,7 +16,7 @@
 &qpnp_smbcharger {
 	qcom,chg-led-sw-controls;
 	qcom,chg-led-support;
-	/delete-property/ dpdm-supply;
+	dpdm-supply = <&usb_otg>;
 };
 
 &usb_otg {
diff --git a/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi b/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi
index fae258d0..431a5e5 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi
@@ -12,6 +12,7 @@
  */
 
 #include "msm8917-pinctrl.dtsi"
+#include "msm8917-camera-sensor-qrd.dtsi"
 
 &blsp1_uart2 {
 	status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/msm8917.dtsi b/arch/arm64/boot/dts/qcom/msm8917.dtsi
index a285110..1d5fe79 100644
--- a/arch/arm64/boot/dts/qcom/msm8917.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917.dtsi
@@ -21,7 +21,7 @@
 	model = "Qualcomm Technologies, Inc. MSM8917";
 	compatible = "qcom,msm8917";
 	qcom,msm-id = <303 0x0>, <308 0x0>, <309 0x0>;
-	interrupt-parent = <&intc>;
+	interrupt-parent = <&wakegic>;
 
 	chosen {
 		bootargs = "sched_enable_hmp=1";
@@ -157,6 +157,7 @@
 };
 
 #include "msm8917-pinctrl.dtsi"
+#include "msm8917-camera.dtsi"
 #include "msm8917-cpu.dtsi"
 #include "msm8917-pm.dtsi"
 #include "msm8917-ion.dtsi"
@@ -197,19 +198,19 @@
 	};
 
 	wakegic: wake-gic {
-		compatible = "qcom,mpm-gic", "qcom,mpm-gic-msm8937";
+		compatible = "qcom,mpm-gic-msm8937", "qcom,mpm-gic";
 		interrupts = <GIC_SPI 171 IRQ_TYPE_EDGE_RISING>;
 		reg = <0x601d0 0x1000>,
 			<0xb011008 0x4>;  /* MSM_APCS_GCC_BASE 4K */
 		reg-names = "vmpm", "ipc";
-		qcom,num-mpm-irqs = <96>;
+		qcom,num-mpm-irqs = <64>;
 		interrupt-controller;
 		interrupt-parent = <&intc>;
 		#interrupt-cells = <3>;
 	};
 
 	wakegpio: wake-gpio {
-		compatible = "qcom,mpm-gpio", "qcom,mpm-gpio-msm8937";
+		compatible = "qcom,mpm-gpio-msm8937", "qcom,mpm-gpio";
 		interrupt-controller;
 		interrupt-parent = <&intc>;
 		#interrupt-cells = <2>;
@@ -1639,6 +1640,10 @@
 		qcom,has-vsys-adc-channel;
 		qcom,wcnss-adc_tm = <&pm8937_adc_tm>;
 	};
+
+	ssc_sensors: qcom,msm-ssc-sensors {
+		compatible = "qcom,msm-ssc-sensors";
+	};
 };
 
 #include "pm8937-rpm-regulator.dtsi"
@@ -1711,3 +1716,141 @@
 	qcom,clk-dis-wait-val = <0x5>;
 	status = "okay";
 };
+
+/* GPU overrides */
+&msm_gpu {
+
+	qcom,gpu-speed-bin = <0x6018 0x80000000 31>;
+	/delete-node/qcom,gpu-pwrlevels;
+
+	qcom,gpu-pwrlevel-bins {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible="qcom,gpu-pwrlevel-bins";
+
+		qcom,gpu-pwrlevels-0 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,speed-bin = <0>;
+			qcom,initial-pwrlevel = <3>;
+
+			/* TURBO */
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <598000000>;
+				qcom,bus-freq = <7>;
+				qcom,bus-min = <7>;
+				qcom,bus-max = <7>;
+			};
+
+			/* NOM+ */
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <523200000>;
+				qcom,bus-freq = <6>;
+				qcom,bus-min = <5>;
+				qcom,bus-max = <7>;
+			};
+
+			/* NOM */
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <484800000>;
+				qcom,bus-freq = <5>;
+				qcom,bus-min = <4>;
+				qcom,bus-max = <6>;
+			};
+
+			/* SVS+ */
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <400000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <5>;
+			};
+
+			/* SVS */
+			qcom,gpu-pwrlevel@4 {
+				reg = <4>;
+				qcom,gpu-freq = <270000000>;
+				qcom,bus-freq = <3>;
+				qcom,bus-min = <1>;
+				qcom,bus-max = <3>;
+			};
+
+			/* XO */
+			qcom,gpu-pwrlevel@5 {
+				reg = <5>;
+				qcom,gpu-freq = <19200000>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
+
+		qcom,gpu-pwrlevels-1 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,speed-bin = <1>;
+			qcom,initial-pwrlevel = <3>;
+
+			/* TURBO */
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <650000000>;
+				qcom,bus-freq = <8>;
+				qcom,bus-min = <8>;
+				qcom,bus-max = <8>;
+			};
+
+			/* NOM+ */
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <523200000>;
+				qcom,bus-freq = <6>;
+				qcom,bus-min = <5>;
+				qcom,bus-max = <7>;
+			};
+
+			/* NOM */
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <484800000>;
+				qcom,bus-freq = <5>;
+				qcom,bus-min = <4>;
+				qcom,bus-max = <6>;
+			};
+
+			/* SVS+ */
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <400000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <5>;
+			};
+
+			/* SVS */
+			qcom,gpu-pwrlevel@4 {
+				reg = <4>;
+				qcom,gpu-freq = <270000000>;
+				qcom,bus-freq = <3>;
+				qcom,bus-min = <1>;
+				qcom,bus-max = <3>;
+			};
+
+			/* XO */
+			qcom,gpu-pwrlevel@5 {
+				reg = <5>;
+				qcom,gpu-freq = <19200000>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8937-coresight.dtsi b/arch/arm64/boot/dts/qcom/msm8937-coresight.dtsi
index b952908..e64af14 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-coresight.dtsi
@@ -462,6 +462,8 @@
 
 		reg = <0x619c000 0x1000>;
 		cpu = <&CPU4>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm4";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
@@ -481,6 +483,8 @@
 
 		reg = <0x619d000 0x1000>;
 		cpu = <&CPU5>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm5";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
@@ -500,6 +504,8 @@
 
 		reg = <0x619e000 0x1000>;
 		cpu = <&CPU6>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm6";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
@@ -519,6 +525,8 @@
 
 		reg = <0x619f000 0x1000>;
 		cpu = <&CPU7>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm7";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
@@ -538,6 +546,8 @@
 
 		reg = <0x61bc000 0x1000>;
 		cpu = <&CPU0>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm0";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
@@ -557,6 +567,8 @@
 
 		reg = <0x61bd000 0x1000>;
 		cpu = <&CPU1>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm1";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
@@ -576,6 +588,8 @@
 
 		reg = <0x61be000 0x1000>;
 		cpu = <&CPU2>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm2";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
@@ -594,6 +608,8 @@
 		arm,primecell-periphid = <0x000bb95d>;
 
 		reg = <0x61bf000 0x1000>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm3";
 		cpu = <&CPU3>;
 
@@ -957,26 +973,13 @@
 		clock-names = "apb_pclk";
 	};
 
-	cti_modem_cpu0: cti@6128000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x6128000 0x1000>;
-		reg-names = "cti-base";
-		coresight-name = "coresight-cti-modem-cpu0";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti_modem_cpu1: cti@6124000{
+	cti_modem_cpu0: cti@6124000{
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b966>;
 
 		reg = <0x6124000 0x1000>;
 		reg-names = "cti-base";
-		coresight-name = "coresight-cti-modem-cpu1";
+		coresight-name = "coresight-cti-modem-cpu0";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
 			 <&clock_gcc clk_qdss_a_clk>;
diff --git a/arch/arm64/boot/dts/qcom/msm8937-interposer-sdm439-qrd.dts b/arch/arm64/boot/dts/qcom/msm8937-interposer-sdm439-qrd.dts
index 71157e2..2bad28b 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-interposer-sdm439-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/msm8937-interposer-sdm439-qrd.dts
@@ -22,10 +22,3 @@
 	qcom,board-id = <0xb 2>;
 	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
 };
-
-&pmi632_vadc {
-	chan@4a {
-		qcom,scale-function = <22>;
-	};
-};
-
diff --git a/arch/arm64/boot/dts/qcom/msm8937-mdss-panels.dtsi b/arch/arm64/boot/dts/qcom/msm8937-mdss-panels.dtsi
index ef4f4b0..528e8aa 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-mdss-panels.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-mdss-panels.dtsi
@@ -25,6 +25,8 @@
 #include "dsi-panel-hx8399c-hd-plus-video.dtsi"
 #include "dsi-panel-nt35695b-truly-fhd-video.dtsi"
 #include "dsi-panel-nt35695b-truly-fhd-cmd.dtsi"
+#include "dsi-panel-icn9706-720-1440p-video.dtsi"
+
 &soc {
 	dsi_panel_pwr_supply: dsi_panel_pwr_supply {
 		#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi
index 57823b8..90685e9 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi
@@ -156,6 +156,39 @@
 	};
 };
 
+&pm8937_gpios {
+	nfc_clk {
+		nfc_clk_default: nfc_clk_default {
+			pins = "gpio5";
+			function = "normal";
+			input-enable;
+			power-source = <1>;
+		};
+	};
+};
+
+&i2c_5 { /* BLSP2 QUP1 (NFC) */
+	status = "ok";
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 17 0x00>;
+		qcom,nq-ven = <&tlmm 16 0x00>;
+		qcom,nq-firm = <&tlmm 130 0x00>;
+		qcom,nq-clkreq = <&pm8937_gpios 5 0x00>;
+		interrupt-parent = <&tlmm>;
+		qcom,clk-src = "BBCLK2";
+		interrupts = <17 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_disable_active
+						&nfc_clk_default>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_disable_suspend>;
+		clocks = <&clock_gcc clk_bb_clk2_pin>;
+		clock-names = "ref_clk";
+	};
+};
+
 &thermal_zones {
 	quiet-therm-step {
 		status = "disabled";
diff --git a/arch/arm64/boot/dts/qcom/msm8937-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8937-pinctrl.dtsi
index 86e41a3..a730287 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-pinctrl.dtsi
@@ -319,6 +319,88 @@
 			};
 		};
 
+		spi7 {
+			spi7_default: spi7_default {
+				mux {
+					pins = "gpio85", "gpio86", "gpio88";
+					function = "blsp_spi7";
+				};
+
+				config {
+					pins = "gpio85", "gpio86", "gpio88";
+					drive-strength = <16>;
+					bias-disable = <0>;
+				};
+			};
+
+			spi7_sleep: spi7_sleep {
+				mux {
+					pins = "gpio85", "gpio86", "gpio88";
+					function = "blsp_spi7";
+				};
+
+				config {
+					pins = "gpio85", "gpio86", "gpio88";
+					drive-strength = <16>;
+					bias-disable = <0>;
+				};
+			};
+			spi7_cs0_active: cs0_active {
+				mux {
+					pins = "gpio87";
+					function = "blsp_spi7_cs0";
+				};
+
+				config {
+					pins = "gpio87";
+					drive-strength = <2>;
+					bias-disable = <0>;
+				};
+			};
+		};
+
+		fpc_reset_int {
+			fpc_reset_low: reset_low {
+				mux {
+					pins = "gpio124";
+					function = "fpc_reset_gpio_low";
+				};
+
+				config {
+					pins = "gpio124";
+					drive-strength = <2>;
+					bias-disable;
+					output-low;
+				};
+			};
+
+			fpc_reset_high: reset_high {
+				mux {
+					pins = "gpio124";
+					function = "fpc_reset_gpio_high";
+				};
+
+				config {
+					pins = "gpio124";
+					drive-strength = <2>;
+					bias-disable;
+					output-high;
+				};
+			};
+
+			fpc_int_low: int_low {
+				mux {
+					pins = "gpio48";
+				};
+				config {
+					pins = "gpio48";
+					drive-strength = <2>;
+					bias-pull-down;
+					input-enable;
+				};
+			};
+		};
+
 		wcnss_pmux_5wire {
 			/* Active configuration of bus pins */
 			wcnss_default: wcnss_default {
diff --git a/arch/arm64/boot/dts/qcom/msm8937-pmi8950-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8937-pmi8950-mtp.dtsi
index 1afa230..2d88ad1 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-pmi8950-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-pmi8950-mtp.dtsi
@@ -30,7 +30,7 @@
 	qcom,battery-data = <&mtp_batterydata>;
 	qcom,chg-led-sw-controls;
 	qcom,chg-led-support;
-	/delete-property/ dpdm-supply;
+	dpdm-supply = <&usb_otg>;
 };
 
 &usb_otg {
diff --git a/arch/arm64/boot/dts/qcom/msm8937-regulator.dtsi b/arch/arm64/boot/dts/qcom/msm8937-regulator.dtsi
index 44bdfc9..d6f24d9 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-regulator.dtsi
@@ -62,7 +62,7 @@
 		pm8937_cx_cdev: regulator-cx-cdev {
 			compatible = "qcom,regulator-cooling-device";
 			regulator-cdev-supply = <&pm8937_s2_floor_level>;
-			regulator-levels = <RPM_SMD_REGULATOR_LEVEL_NOM
+			regulator-levels = <RPM_SMD_REGULATOR_LEVEL_NOM_PLUS
 					RPM_SMD_REGULATOR_LEVEL_RETENTION>;
 			#cooling-cells = <2>;
 		};
diff --git a/arch/arm64/boot/dts/qcom/msm8937.dtsi b/arch/arm64/boot/dts/qcom/msm8937.dtsi
index 66c7e7c..85e3043 100644
--- a/arch/arm64/boot/dts/qcom/msm8937.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937.dtsi
@@ -132,7 +132,7 @@
 		dump_mem: mem_dump_region {
 			compatible = "shared-dma-pool";
 			reusable;
-			size = <0 0x2400000>;
+			size = <0x400000>;
 		};
 	};
 
@@ -998,7 +998,7 @@
 		reg-names = "wdt-base";
 		interrupts = <0 3 0>, <0 4 0>;
 		qcom,bark-time = <11000>;
-		qcom,pet-time = <10000>;
+		qcom,pet-time = <9360>;
 		qcom,ipi-ping;
 		qcom,wakeup-enable;
 		status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-cdp.dtsi
index 6e961b1..d47dd75 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-cdp.dtsi
@@ -207,8 +207,8 @@
 		cam_v_custom1-supply = <&pm8953_l23>;
 		qcom,cam-vreg-name = "cam_vio", "cam_vdig", "cam_vaf",
 						"cam_vana", "cam_v_custom1";
-		qcom,cam-vreg-min-voltage = <0 1200000 2850000 2800000 1220000>;
-		qcom,cam-vreg-max-voltage = <0 1200000 2850000 2800000 1220000>;
+		qcom,cam-vreg-min-voltage = <0 1200000 2850000 2800000 1200000>;
+		qcom,cam-vreg-max-voltage = <0 1200000 2850000 2800000 1200000>;
 		qcom,cam-vreg-op-mode = <0 105000 100000 80000 105000>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_default
diff --git a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi
index 6e961b1..d47dd75 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi
@@ -207,8 +207,8 @@
 		cam_v_custom1-supply = <&pm8953_l23>;
 		qcom,cam-vreg-name = "cam_vio", "cam_vdig", "cam_vaf",
 						"cam_vana", "cam_v_custom1";
-		qcom,cam-vreg-min-voltage = <0 1200000 2850000 2800000 1220000>;
-		qcom,cam-vreg-max-voltage = <0 1200000 2850000 2800000 1220000>;
+		qcom,cam-vreg-min-voltage = <0 1200000 2850000 2800000 1200000>;
+		qcom,cam-vreg-max-voltage = <0 1200000 2850000 2800000 1200000>;
 		qcom,cam-vreg-op-mode = <0 105000 100000 80000 105000>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_default
diff --git a/arch/arm64/boot/dts/qcom/msm8953-coresight.dtsi b/arch/arm64/boot/dts/qcom/msm8953-coresight.dtsi
index 196a526..d3c2e26 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-coresight.dtsi
@@ -505,6 +505,8 @@
 
 		reg = <0x619c000 0x1000>;
 		cpu = <&CPU0>;
+
+		qcom,tupwr-disable;
 		coresight-name = "coresight-etm0";
 
 		clocks = <&clock_gcc clk_qdss_clk>,
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
index b182a25..1b09b3c 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
@@ -23,6 +23,31 @@
 	qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
 };
 
+&vendor {
+	mtp_batterydata: qcom,battery-data {
+		qcom,batt-id-range-pct = <15>;
+		#include "batterydata-itech-3000mah.dtsi"
+		#include "batterydata-ascent-3450mAh.dtsi"
+	};
+};
+
+&qpnp_fg {
+	qcom,battery-data = <&mtp_batterydata>;
+};
+
+&pmi_haptic{
+	qcom,lra-auto-res-mode="qwd";
+	qcom,lra-high-z="opt1";
+	qcom,lra-res-cal-period = <0>;
+	qcom,wave-play-rate-us = <4165>;
+};
+
+&qpnp_smbcharger {
+	qcom,battery-data = <&mtp_batterydata>;
+	qcom,chg-led-sw-controls;
+	qcom,chg-led-support;
+};
+
 &int_codec {
 	status = "disabled";
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
index b80583e..e3a5b4a 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
@@ -25,3 +25,75 @@
 	qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
 };
 
+&int_codec {
+	status = "disabled";
+};
+
+&pmic_analog_codec {
+	status = "disabled";
+};
+
+&wsa881x_i2c_f {
+	status = "disabled";
+};
+
+&wsa881x_i2c_45 {
+	status = "disabled";
+};
+
+&cdc_pri_mi2s_gpios {
+	status = "disabled";
+};
+
+&wsa881x_analog_vi_gpio {
+	status = "disabled";
+};
+
+&wsa881x_analog_clk_gpio {
+	status = "disabled";
+};
+
+&wsa881x_analog_reset_gpio {
+	status = "disabled";
+};
+
+&cdc_comp_gpios {
+	status = "disabled";
+};
+
+&slim_msm {
+	status = "okay";
+};
+
+&dai_slim {
+	status = "okay";
+};
+
+&wcd9xxx_intc {
+	status = "okay";
+};
+
+&clock_audio {
+	status = "okay";
+};
+
+&wcd9335 {
+	status = "okay";
+};
+
+&cdc_us_euro_sw {
+	status = "okay";
+};
+
+&cdc_quin_mi2s_gpios {
+	status = "okay";
+};
+
+&wcd_rst_gpio {
+	status = "okay";
+};
+
+&ext_codec {
+	qcom,model = "msm8953-tasha-snd-card";
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi b/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi
index f82b68d..14c4d2b 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi
@@ -231,8 +231,8 @@
 			qcom,gpu-pwrlevel@5 {
 				reg = <5>;
 				qcom,gpu-freq = <216000000>;
-				qcom,bus-freq = <1>;
-				qcom,bus-min = <1>;
+				qcom,bus-freq = <3>;
+				qcom,bus-min = <2>;
 				qcom,bus-max = <4>;
 			};
 
@@ -240,7 +240,7 @@
 			qcom,gpu-pwrlevel@6 {
 				reg = <6>;
 				qcom,gpu-freq = <133300000>;
-				qcom,bus-freq = <1>;
+				qcom,bus-freq = <3>;
 				qcom,bus-min = <1>;
 				qcom,bus-max = <4>;
 			};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mdss-panels.dtsi b/arch/arm64/boot/dts/qcom/msm8953-mdss-panels.dtsi
index f7671dc..a80b4fe 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-mdss-panels.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-mdss-panels.dtsi
@@ -25,6 +25,7 @@
 #include "dsi-panel-lt8912-1080p-video.dtsi"
 #include "dsi-panel-hx8399c-fhd-plus-video.dtsi"
 #include "dsi-panel-hx83100a-800p-video.dtsi"
+#include "dsi-panel-boent51021-1200p-video.dtsi"
 
 &soc {
 	dsi_panel_pwr_supply: dsi_panel_pwr_supply {
@@ -75,7 +76,13 @@
 		23 1e 08 09 05 03 04 a0
 		23 1a 08 09 05 03 04 a0];
 	qcom,esd-check-enabled;
-	qcom,mdss-dsi-panel-status-check-mode = "bta_check";
+	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+	qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+	qcom,mdss-dsi-panel-status-value = <0x1c>;
+	qcom,mdss-dsi-panel-on-check-value = <0x1c>;
+	qcom,mdss-dsi-panel-status-read-length = <1>;
+	qcom,mdss-dsi-panel-max-error-count = <3>;
 
 };
 
@@ -86,7 +93,13 @@
 		23 1e 08 09 05 03 04 a0
 		23 1a 08 09 05 03 04 a0];
 	qcom,esd-check-enabled;
-	qcom,mdss-dsi-panel-status-check-mode = "bta_check";
+	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+	qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+	qcom,mdss-dsi-panel-status-value = <0x1c>;
+	qcom,mdss-dsi-panel-on-check-value = <0x1c>;
+	qcom,mdss-dsi-panel-status-read-length = <1>;
+	qcom,mdss-dsi-panel-max-error-count = <3>;
 };
 
 &dsi_r69006_1080p_video {
@@ -157,3 +170,11 @@
 		1f 1c 05 06 03 03 04 a0
 		1f 10 05 06 03 03 04 a0];
 };
+
+&dsi_boent51021_1200p_video {
+	qcom,mdss-dsi-panel-timings-phy-v2 = [25 20 08 0a 06 03 04 a0
+		25 20 08 0a 06 03 04 a0
+		25 20 08 0a 06 03 04 a0
+		25 20 08 0a 06 03 04 a0
+		25 1d 08 0a 06 03 04 a0];
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
index c6ae512..00614b2 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
@@ -21,7 +21,7 @@
 	qcom,board-id = <8 0>;
 };
 
-/{
+&vendor {
 	mtp_batterydata: qcom,battery-data {
 		qcom,batt-id-range-pct = <15>;
 		#include "batterydata-itech-3000mah.dtsi"
@@ -33,6 +33,13 @@
 	qcom,battery-data = <&mtp_batterydata>;
 };
 
+&pmi_haptic{
+	qcom,lra-auto-res-mode="qwd";
+	qcom,lra-high-z="opt1";
+	qcom,lra-res-cal-period = <0>;
+	qcom,wave-play-rate-us = <4165>;
+};
+
 &qpnp_smbcharger {
 	qcom,battery-data = <&mtp_batterydata>;
 	qcom,chg-led-sw-controls;
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
index 97c6db3..539ac59 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
@@ -37,6 +37,13 @@
 	qcom,battery-data = <&mtp_batterydata>;
 };
 
+&pmi_haptic{
+	qcom,lra-auto-res-mode="qwd";
+	qcom,lra-high-z="opt1";
+	qcom,lra-res-cal-period = <0>;
+	qcom,wave-play-rate-us = <4165>;
+};
+
 &qpnp_smbcharger {
 	qcom,battery-data = <&mtp_batterydata>;
 	qcom,chg-led-sw-controls;
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
index cc3c392..e8de4ef 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
@@ -1731,5 +1731,19 @@
 				bias-disable;
 			};
 		};
+
+		ssusb_mode_sel: ssusb_mode_sel {
+			mux {
+				pins = "gpio12";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio12";
+				drive-strength = <2>;
+				bias-disable;
+				input-disable;
+			};
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8953-vidc.dtsi b/arch/arm64/boot/dts/qcom/msm8953-vidc.dtsi
index cb8cdf2..1558010 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-vidc.dtsi
@@ -88,8 +88,8 @@
 				<&apps_iommu 0x82c 0x01>,
 				<&apps_iommu 0x821 0x10>;
 			buffer-types = <0xfff>;
-			virtual-addr-pool = <0x5dc00000 0x7f000000
-				0xdcc00000 0x1000000>;
+			virtual-addr-pool = <0x79000000 0x28000000
+				0xa1000000 0xc9000000>;
 		};
 
 		secure_bitstream_cb {
@@ -102,7 +102,7 @@
 				<&apps_iommu 0x926 0x0>,
 				<&apps_iommu 0x929 0x2>;
 			buffer-types = <0x241>;
-			virtual-addr-pool = <0x4b000000 0x12c00000>;
+			virtual-addr-pool = <0x51000000 0x28000000>;
 			qcom,secure-context-bank;
 		};
 
@@ -113,7 +113,7 @@
 				<&apps_iommu 0x910 0x0>,
 				<&apps_iommu 0x92c 0x0>;
 			buffer-types = <0x106>;
-			virtual-addr-pool = <0x25800000 0x25800000>;
+			virtual-addr-pool = <0x29000000 0x28000000>;
 			qcom,secure-context-bank;
 		};
 
@@ -125,7 +125,7 @@
 				<&apps_iommu 0x925 0x8>,
 				<&apps_iommu 0x928 0x0>;
 			buffer-types = <0x480>;
-			virtual-addr-pool = <0x1000000 0x24800000>;
+			virtual-addr-pool = <0x1000000 0x28000000>;
 			qcom,secure-context-bank;
 		};
 
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index 67fd75f..c3178e0 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -30,9 +30,21 @@
 		bootargs = "core_ctl_disable_cpumask=0-7 kpti=0";
 	};
 
+	vendor: vendor {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0 0 0xffffffff>;
+		compatible = "simple-bus";
+	};
+
 	firmware: firmware {
 		android {
 			compatible = "android,firmware";
+			vbmeta {
+				compatible = "android,vbmeta";
+				parts = "vbmeta,boot,system,vendor,dtbo,recovery";
+			};
+
 			fstab {
 				compatible = "android,fstab";
 				vendor {
@@ -40,7 +52,7 @@
 					dev = "/dev/block/platform/soc/7824900.sdhci/by-name/vendor";
 					type = "ext4";
 					mnt_flags = "ro,barrier=1,discard";
-					fsmgr_flags = "wait";
+					fsmgr_flags = "wait,avb";
 					status = "ok";
 				};
 				system {
@@ -48,7 +60,7 @@
 					dev = "/dev/block/platform/soc/7824900.sdhci/by-name/system";
 					type = "ext4";
 					mnt_flags = "ro,barrier=1,discard";
-					fsmgr_flags = "wait";
+					fsmgr_flags = "wait,avb";
 					status = "ok";
 				};
 
@@ -97,7 +109,7 @@
 			compatible = "shared-dma-pool";
 			reusable;
 			alignment = <0 0x400000>;
-			size = <0 0x09800000>;
+			size = <0 0x0b400000>;
 		};
 
 		qseecom_mem: qseecom_region@0 {
@@ -143,7 +155,7 @@
 		dump_mem: mem_dump_region {
 			compatible = "shared-dma-pool";
 			reusable;
-			size = <0 0x2400000>;
+			size = <0x400000>;
 		};
 	};
 
@@ -1230,7 +1242,7 @@
 		reg-names = "wdt-base";
 		interrupts = <0 3 0>, <0 4 0>;
 		qcom,bark-time = <11000>;
-		qcom,pet-time = <10000>;
+		qcom,pet-time = <9360>;
 		qcom,ipi-ping;
 		qcom,wakeup-enable;
 		qcom,scandump-size = <0x40000>;
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 338f82a..2c93de7 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -326,8 +326,8 @@
 		blsp2_spi5: spi@075ba000{
 			compatible = "qcom,spi-qup-v2.2.1";
 			reg = <0x075ba000 0x600>;
-			interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&gcc GCC_BLSP2_QUP5_SPI_APPS_CLK>,
+			interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP2_QUP6_SPI_APPS_CLK>,
 				 <&gcc GCC_BLSP2_AHB_CLK>;
 			clock-names = "core", "iface";
 			pinctrl-names = "default", "sleep";
diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
index af29080..5204137 100644
--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi
@@ -101,12 +101,13 @@
 			reg = <0x3100 0x100>;
 			#address-cells = <1>;
 			#size-cells = <0>;
-			interrupts = <0x0 0x31 0x0 IRQ_TYPE_NONE>;
+			interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
 			interrupt-names = "eoc-int-en-set";
 			qcom,adc-bit-resolution = <15>;
 			qcom,adc-vdd-reference = <1800>;
 			qcom,vadc-poll-eoc;
 			qcom,pmic-revid = <&pm8916_revid>;
+			#thermal-sensor-cells = <1>;
 
 			chan@8 {
 				label = "die_temp";
@@ -145,11 +146,12 @@
 		pm8916_tz: qcom,temp-alarm@2400 {
 			compatible = "qcom,qpnp-temp-alarm";
 			reg = <0x2400 0x100>;
-			interrupts = <0x0 0x24 0x0 IRQ_TYPE_NONE>;
+			interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
 			label = "pm8916_tz";
 			qcom,channel-num = <8>;
 			qcom,threshold-set = <0>;
 			qcom,temp_alarm-vadc = <&pm8916_vadc>;
+			#thermal-sensor-cells = <0>;
 		};
 
 		pm8916_adc_tm: vadc@3400 {
@@ -157,9 +159,9 @@
 			reg = <0x3400 0x100>;
 			#address-cells = <1>;
 			#size-cells = <0>;
-			interrupts = <0x0 0x34 0x0 IRQ_TYPE_NONE>,
-				     <0x0 0x34 0x3 IRQ_TYPE_NONE>,
-				     <0x0 0x34 0x4 IRQ_TYPE_NONE>;
+			interrupts = <0x0 0x34 0x0 IRQ_TYPE_EDGE_RISING>,
+				     <0x0 0x34 0x3 IRQ_TYPE_EDGE_RISING>,
+				     <0x0 0x34 0x4 IRQ_TYPE_EDGE_RISING>;
 			interrupt-names = "eoc-int-en-set",
 					  "high-thr-en-set",
 					  "low-thr-en-set";
diff --git a/arch/arm64/boot/dts/qcom/pmi632.dtsi b/arch/arm64/boot/dts/qcom/pmi632.dtsi
index 0b1a50a..53147eb 100644
--- a/arch/arm64/boot/dts/qcom/pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi632.dtsi
@@ -289,20 +289,20 @@
 		pmi632_gpios: pinctrl@c000 {
 			compatible = "qcom,spmi-gpio";
 			reg = <0xc000 0x800>;
-			interrupts = <0x2 0xc1 0 IRQ_TYPE_NONE>,
+			interrupts = <0x2 0xc0 0 IRQ_TYPE_NONE>,
+					<0x2 0xc1 0 IRQ_TYPE_NONE>,
 					<0x2 0xc2 0 IRQ_TYPE_NONE>,
 					<0x2 0xc3 0 IRQ_TYPE_NONE>,
 					<0x2 0xc4 0 IRQ_TYPE_NONE>,
 					<0x2 0xc5 0 IRQ_TYPE_NONE>,
 					<0x2 0xc6 0 IRQ_TYPE_NONE>,
 					<0x2 0xc7 0 IRQ_TYPE_NONE>;
-			interrupt-names = "pmi632_gpio2", "pmi632_gpio3",
-					"pmi632_gpio4", "pmi632_gpio5",
-					"pmi632_gpio6", "pmi632_gpio7",
-					"pmi632_gpio8";
+			interrupt-names = "pmi632_gpio1", "pmi632_gpio2",
+					"pmi632_gpio3", "pmi632_gpio4",
+					"pmi632_gpio5", "pmi632_gpio6",
+					"pmi632_gpio7", "pmi632_gpio8";
 			gpio-controller;
 			#gpio-cells = <2>;
-			qcom,gpios-disallowed = <1>;
 		};
 
 		pmi632_charger: qcom,qpnp-smb5 {
@@ -315,6 +315,12 @@
 			dpdm-supply = <&qusb_phy>;
 			qcom,auto-recharge-soc = <98>;
 			qcom,chg-vadc = <&pmi632_vadc>;
+			qcom,flash-disable-soc = <10>;
+			qcom,sw-jeita-enable;
+			qcom,step-charging-enable;
+			qcom,hw-die-temp-mitigation;
+			qcom,hw-connector-mitigation;
+			qcom,connector-internal-pull-kohm = <100>;
 
 			qcom,thermal-mitigation
 				= <3000000 2500000 2000000 1500000
@@ -464,7 +470,6 @@
 						"ilim1-s1",
 						"ilim2-s2",
 						"vreg-ok";
-				qcom,flash-disable-soc = <10>;
 			};
 
 			smb5_vbus: qcom,smb5-vbus {
@@ -525,6 +530,16 @@
 			compatible = "qcom,msm-bcl-soc";
 			#thermal-sensor-cells = <0>;
 		};
+
+		pmi632_pbs_client3: qcom,pbs@7400 {
+			compatible = "qcom,qpnp-pbs";
+			reg = <0x7400 0x100>;
+		};
+
+		pmi632_sdam7: qcom,sdam@b600 {
+			compatible = "qcom,spmi-sdam";
+			reg = <0xb600 0x100>;
+		};
 	};
 
 	pmi632_3: qcom,pmi632@3 {
@@ -537,7 +552,7 @@
 			compatible = "qcom,qpnp-vibrator-ldo";
 			reg = <0x5700 0x100>;
 			qcom,vib-ldo-volt-uv = <3000000>;
-			qcom,vib-overdrive-volt-uv = <3544000>;
+			qcom,disable-overdrive;
 		};
 
 		pmi632_pwm: qcom,pwms@b300 {
@@ -545,6 +560,36 @@
 			reg = <0xb300 0x500>;
 			reg-names = "lpg-base";
 			#pwm-cells = <2>;
+			nvmem-names = "ppg_sdam";
+			nvmem = <&pmi632_sdam7>;
+			qcom,pbs-client = <&pmi632_pbs_client3>;
+			qcom,lut-sdam-base = <0x80>;
+			qcom,lut-patterns = <0 0 0 14 28 42 56 70 84 100
+						100 84 70 56 42 28 14 0 0 0>;
+			lpg@1 {
+				qcom,lpg-chan-id = <1>;
+				qcom,ramp-step-ms = <200>;
+				qcom,ramp-low-index = <0>;
+				qcom,ramp-high-index = <19>;
+				qcom,ramp-pattern-repeat;
+				qcom,lpg-sdam-base = <0x48>;
+			};
+			lpg@2 {
+				qcom,lpg-chan-id = <2>;
+				qcom,ramp-step-ms = <200>;
+				qcom,ramp-low-index = <0>;
+				qcom,ramp-high-index = <19>;
+				qcom,ramp-pattern-repeat;
+				qcom,lpg-sdam-base = <0x56>;
+			};
+			lpg@3 {
+				qcom,lpg-chan-id = <3>;
+				qcom,ramp-step-ms = <200>;
+				qcom,ramp-low-index = <0>;
+				qcom,ramp-high-index = <19>;
+				qcom,ramp-pattern-repeat;
+				qcom,lpg-sdam-base = <0x64>;
+			};
 		};
 
 		pmi632_rgb: qcom,leds@d000 {
diff --git a/arch/arm64/boot/dts/qcom/pmi8950.dtsi b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
index 8797ea8..fa93918 100644
--- a/arch/arm64/boot/dts/qcom/pmi8950.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
@@ -587,13 +587,15 @@
 			};
 		};
 
-		pmi_haptic: qcom,haptic@c000 {
+		pmi_haptic: qcom,haptics@c000 {
 			compatible = "qcom,qpnp-haptics";
 			reg = <0xc000 0x100>;
 			interrupts = <0x3 0xc0 0x0 IRQ_TYPE_EDGE_BOTH>,
 					<0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>;
 			interrupt-names = "hap-sc-irq", "hap-play-irq";
 			qcom,pmic-revid = <&pmi8950_revid>;
+			vcc_pon-supply = <&pon_perph_reg>;
+			qcom,int-pwm-freq-khz = <505>;
 			qcom,play-mode = "direct";
 			qcom,wave-play-rate-us = <5263>;
 			qcom,actuator-type = <0>;
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc-cdp-base.dts b/arch/arm64/boot/dts/qcom/qcs605-lc-cdp-base.dts
new file mode 100644
index 0000000..e08acea
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc-cdp-base.dts
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "qcs605-lc.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. QCS605 LC CDP Base SoC";
+	compatible = "qcom,qcs605";
+	qcom,board-id = <1 4>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/qcs605-lc-cdp-overlay.dts
new file mode 100644
index 0000000..5987fb3
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc-cdp-overlay.dts
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "qcs605-lc-cdp.dtsi"
+#include "qcs605-lc-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. QC605 LC Groot + PM8005 CDP";
+	compatible = "qcom,qcs605-cdp", "qcom,qcs605", "qcom,cdp";
+	qcom,msm-id = <347 0x0>;
+	qcom,board-id = <1 4>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc-cdp.dts b/arch/arm64/boot/dts/qcom/qcs605-lc-cdp.dts
new file mode 100644
index 0000000..753b496
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc-cdp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "qcs605-lc.dtsi"
+#include "qcs605-lc-cdp.dtsi"
+#include "qcs605-lc-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. QC605 LC Groot + PM8005 CDP";
+	compatible = "qcom,qcs605-cdp", "qcom,qcs605", "qcom,cdp";
+	qcom,board-id = <1 4>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc-cdp.dtsi b/arch/arm64/boot/dts/qcom/qcs605-lc-cdp.dtsi
new file mode 100644
index 0000000..e32128c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc-cdp.dtsi
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "qcs605-lc-pmic-overlay.dtsi"
+
+&qupv3_se9_2uart {
+	status = "disabled";
+};
+
+&qupv3_se12_2uart {
+	status = "ok";
+};
+
+&qupv3_se8_spi {
+	status = "disabled";
+};
+
+&sdhc_1 {
+	vdd-supply = <&pm660_l19>;
+	qcom,vdd-voltage-level = <2960000 2960000>;
+	qcom,vdd-current-level = <0 570000>;
+
+	vdd-io-supply = <&pm660_l8>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-lpm-sup;
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <0 325000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc1_clk_on  &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>;
+	pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>;
+
+	status = "ok";
+};
+
+&tlmm {
+	sdc2_cd_on: cd_on {
+		mux {
+			pins = "gpio116";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio116";
+			drive-strength = <2>;
+			bias-pull-up;
+			};
+		};
+
+	sdc2_cd_off: cd_off {
+		mux {
+			pins = "gpio116";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio116";
+			drive-strength = <2>;
+			bias-disable;
+			};
+	};
+};
+
+&sdhc_2 {
+	/* VDD external regulator is enabled/disabled by pm660_l18 regulator */
+	vdd-io-supply = <&pm660_l18>;
+	qcom,vdd-io-voltage-level = <1800000 2960000>;
+	qcom,vdd-io-current-level = <0 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+
+	cd-gpios = <&tlmm 116 0x1>;
+
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605.dtsi b/arch/arm64/boot/dts/qcom/qcs605.dtsi
index 747593f..61a812c 100644
--- a/arch/arm64/boot/dts/qcom/qcs605.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605.dtsi
@@ -90,8 +90,8 @@
 	lmh-dcvs-00 {
 		trips {
 			active-config {
-				temperature = <105000>;
-				hysteresis = <40000>;
+				temperature = <100000>;
+				hysteresis = <35000>;
 			};
 		};
 	};
@@ -99,8 +99,8 @@
 	lmh-dcvs-01 {
 		trips {
 			active-config {
-				temperature = <105000>;
-				hysteresis = <40000>;
+				temperature = <100000>;
+				hysteresis = <35000>;
 			};
 		};
 	};
@@ -108,4 +108,276 @@
 
 &msm_gpu {
 	/delete-node/qcom,gpu-mempools;
+	/delete-node/qcom,gpu-pwrlevel-bins;
+
+	qcom,gpu-pwrlevel-bins {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible="qcom,gpu-pwrlevel-bins";
+
+		qcom,gpu-pwrlevels-0 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,speed-bin = <0>;
+
+			qcom,initial-pwrlevel = <4>;
+			qcom,ca-target-pwrlevel = <5>;
+
+			/* TURBO_L1 */
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <780000000>;
+				qcom,bus-freq = <11>;
+				qcom,bus-min = <10>;
+				qcom,bus-max = <11>;
+			};
+
+			/* TURBO */
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <750000000>;
+				qcom,bus-freq = <11>;
+				qcom,bus-min = <9>;
+				qcom,bus-max = <11>;
+			};
+
+			/* NOM_L1 */
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <650000000>;
+				qcom,bus-freq = <10>;
+				qcom,bus-min = <8>;
+				qcom,bus-max = <11>;
+			};
+
+			/* NOM */
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <565000000>;
+				qcom,bus-freq = <9>;
+				qcom,bus-min = <8>;
+				qcom,bus-max = <10>;
+			};
+
+			/* SVS_L1 */
+			qcom,gpu-pwrlevel@4 {
+				reg = <4>;
+				qcom,gpu-freq = <430000000>;
+				qcom,bus-freq = <8>;
+				qcom,bus-min = <7>;
+				qcom,bus-max = <10>;
+			};
+
+			/* SVS */
+			qcom,gpu-pwrlevel@5 {
+				reg = <5>;
+				qcom,gpu-freq = <355000000>;
+				qcom,bus-freq = <7>;
+				qcom,bus-min = <5>;
+				qcom,bus-max = <8>;
+			};
+
+			/* LOW SVS */
+			qcom,gpu-pwrlevel@6 {
+				reg = <6>;
+				qcom,gpu-freq = <267000000>;
+				qcom,bus-freq = <6>;
+				qcom,bus-min = <4>;
+				qcom,bus-max = <7>;
+			};
+
+			/* MIN SVS */
+			qcom,gpu-pwrlevel@7 {
+				reg = <7>;
+				qcom,gpu-freq = <180000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <4>;
+			};
+
+			/* XO */
+			qcom,gpu-pwrlevel@8 {
+				reg = <8>;
+				qcom,gpu-freq = <0>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
+
+		qcom,gpu-pwrlevels-1 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,speed-bin = <146>;
+
+			qcom,initial-pwrlevel = <3>;
+			qcom,ca-target-pwrlevel = <4>;
+
+			/* TURBO */
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <700000000>;
+				qcom,bus-freq = <11>;
+				qcom,bus-min = <9>;
+				qcom,bus-max = <11>;
+			};
+
+			/* NOM_L1 */
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <650000000>;
+				qcom,bus-freq = <10>;
+				qcom,bus-min = <8>;
+				qcom,bus-max = <11>;
+			};
+
+			/* NOM */
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <565000000>;
+				qcom,bus-freq = <9>;
+				qcom,bus-min = <8>;
+				qcom,bus-max = <10>;
+			};
+
+			/* SVS_L1 */
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <430000000>;
+				qcom,bus-freq = <8>;
+				qcom,bus-min = <7>;
+				qcom,bus-max = <10>;
+			};
+
+			/* SVS */
+			qcom,gpu-pwrlevel@4 {
+				reg = <4>;
+				qcom,gpu-freq = <355000000>;
+				qcom,bus-freq = <7>;
+				qcom,bus-min = <5>;
+				qcom,bus-max = <8>;
+			};
+
+			/* LOW SVS */
+			qcom,gpu-pwrlevel@5 {
+				reg = <5>;
+				qcom,gpu-freq = <267000000>;
+				qcom,bus-freq = <6>;
+				qcom,bus-min = <4>;
+				qcom,bus-max = <7>;
+			};
+
+			/* MIN SVS */
+			qcom,gpu-pwrlevel@6 {
+				reg = <6>;
+				qcom,gpu-freq = <180000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <4>;
+			};
+
+			/* XO */
+			qcom,gpu-pwrlevel@7 {
+				reg = <7>;
+				qcom,gpu-freq = <0>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
+
+		qcom,gpu-pwrlevels-2 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,speed-bin = <163>;
+
+			qcom,initial-pwrlevel = <4>;
+			qcom,ca-target-pwrlevel = <5>;
+
+			/* TURBO_L1 */
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <780000000>;
+				qcom,bus-freq = <11>;
+				qcom,bus-min = <10>;
+				qcom,bus-max = <11>;
+			};
+
+			/* TURBO */
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <750000000>;
+				qcom,bus-freq = <11>;
+				qcom,bus-min = <9>;
+				qcom,bus-max = <11>;
+			};
+
+			/* NOM_L1 */
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <650000000>;
+				qcom,bus-freq = <10>;
+				qcom,bus-min = <8>;
+				qcom,bus-max = <11>;
+			};
+
+			/* NOM */
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <565000000>;
+				qcom,bus-freq = <9>;
+				qcom,bus-min = <8>;
+				qcom,bus-max = <10>;
+			};
+
+			/* SVS_L1 */
+			qcom,gpu-pwrlevel@4 {
+				reg = <4>;
+				qcom,gpu-freq = <430000000>;
+				qcom,bus-freq = <8>;
+				qcom,bus-min = <7>;
+				qcom,bus-max = <10>;
+			};
+
+			/* SVS */
+			qcom,gpu-pwrlevel@5 {
+				reg = <5>;
+				qcom,gpu-freq = <355000000>;
+				qcom,bus-freq = <7>;
+				qcom,bus-min = <5>;
+				qcom,bus-max = <8>;
+			};
+
+			/* LOW SVS */
+			qcom,gpu-pwrlevel@6 {
+				reg = <6>;
+				qcom,gpu-freq = <267000000>;
+				qcom,bus-freq = <6>;
+				qcom,bus-min = <4>;
+				qcom,bus-max = <7>;
+			};
+
+			/* MIN SVS */
+			qcom,gpu-pwrlevel@7 {
+				reg = <7>;
+				qcom,gpu-freq = <180000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <4>;
+			};
+
+			/* XO */
+			qcom,gpu-pwrlevel@8 {
+				reg = <8>;
+				qcom,gpu-freq = <0>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sda450-pmi632-mtp-s3.dts b/arch/arm64/boot/dts/qcom/sda450-pmi632-mtp-s3.dts
index c907977..f20c2ba 100644
--- a/arch/arm64/boot/dts/qcom/sda450-pmi632-mtp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sda450-pmi632-mtp-s3.dts
@@ -14,8 +14,8 @@
 /dts-v1/;
 
 #include "sda450.dtsi"
-#include "sdm450-pmi632-mtp-s3.dtsi"
 #include "sdm450-pmi632.dtsi"
+#include "sdm450-pmi632-mtp-s3.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDA450 + PMI632 MTP S3";
diff --git a/arch/arm64/boot/dts/qcom/sda670-camera-sensor-hdk.dtsi b/arch/arm64/boot/dts/qcom/sda670-camera-sensor-hdk.dtsi
new file mode 100644
index 0000000..484ed64
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-camera-sensor-hdk.dtsi
@@ -0,0 +1,412 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	com,camera-flash@0 {
+		cell-index = <0>;
+		reg = <0x00 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm660l_flash0 &pm660l_flash1>;
+		torch-source = <&pm660l_torch0 &pm660l_torch1>;
+		switch-source = <&pm660l_switch0>;
+		status = "ok";
+	};
+
+	qcom,camera-flash@1 {
+		cell-index = <1>;
+		reg = <0x01 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm660l_flash0 &pm660l_flash1>;
+		torch-source = <&pm660l_torch0 &pm660l_torch1>;
+		switch-source = <&pm660l_switch0>;
+		status = "ok";
+	};
+
+	led_flash_front: qcom,camera-flash@2 {
+		cell-index = <2>;
+		reg = <0x02 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm660l_flash2>;
+		torch-source = <&pm660l_torch2>;
+		switch-source = <&pm660l_switch1>;
+		status = "ok";
+	};
+
+	gpio-regulator@0 {
+		compatible = "regulator-fixed";
+		reg = <0x00 0x00>;
+		regulator-name = "actuator_regulator";
+		regulator-min-microvolt = <2800000>;
+		regulator-max-microvolt = <2800000>;
+		regulator-enable-ramp-delay = <100>;
+		enable-active-high;
+		gpio = <&tlmm 27 0>;
+	};
+
+	camera_ldo: gpio-regulator@2 {
+		compatible = "regulator-fixed";
+		reg = <0x02 0x00>;
+		regulator-name = "camera_ldo";
+		regulator-min-microvolt = <1352000>;
+		regulator-max-microvolt = <1352000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&pm660l_gpios 3 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&camera_dvdd_en_default>;
+		vin-supply = <&pm660_s6>;
+	};
+
+	camera_rear_ldo: gpio-regulator@1 {
+		compatible = "regulator-fixed";
+		reg = <0x01 0x00>;
+		regulator-name = "camera_rear_ldo";
+		regulator-min-microvolt = <1352000>;
+		regulator-max-microvolt = <1352000>;
+		regulator-enable-ramp-delay = <135>;
+		enable-active-high;
+		gpio = <&pm660l_gpios 4 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&camera_rear_dvdd_en_default>;
+		vin-supply = <&pm660_s6>;
+	};
+
+	camera_vio_ldo: gpio-regulator@3 {
+		compatible = "regulator-fixed";
+		reg = <0x03 0x00>;
+		regulator-name = "camera_vio_ldo";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&tlmm 29 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&cam_sensor_rear_vio>;
+		vin-supply = <&pm660_s4>;
+	};
+
+	camera_vana_ldo: gpio-regulator@4 {
+		compatible = "regulator-fixed";
+		reg = <0x04 0x00>;
+		regulator-name = "camera_vana_ldo";
+		regulator-min-microvolt = <2850000>;
+		regulator-max-microvolt = <2850000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&tlmm 8 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&cam_sensor_rear_vana>;
+		vin-supply = <&pm660l_bob>;
+	};
+};
+
+&cam_cci {
+	qcom,cam-res-mgr {
+		compatible = "qcom,cam-res-mgr";
+		status = "ok";
+	};
+
+	qcom,actuator@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	qcom,actuator@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	qcom,actuator@2 {
+		cell-index = <2>;
+		reg = <0x2>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	ois_rear: qcom,ois@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,ois";
+		cci-master = <0>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+		status = "disabled";
+	};
+
+	qcom,eeprom@0 {
+		cell-index = <0>;
+		reg = <0>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_rear_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+		rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+		rgltr-load-current = <0 80000 105000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 30 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,eeprom@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
+		rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
+		rgltr-load-current = <105000 0 80000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,eeprom@2 {
+		cell-index = <2>;
+		reg = <0x2>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
+		rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
+		rgltr-load-current = <105000 0 80000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@0 {
+		cell-index = <0>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x0>;
+		csiphy-sd-index = <0>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		led-flash-src = <&led_flash_rear>;
+		actuator-src = <&actuator_rear>;
+		ois-src = <&ois_rear>;
+		eeprom-src = <&eeprom_rear>;
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_rear_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1800000 2850000 1352000 0>;
+		rgltr-max-voltage = <1800000 2850000 1352000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 30 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@1 {
+		cell-index = <1>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x1>;
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		led-flash-src = <&led_flash_rear_aux>;
+		actuator-src = <&actuator_rear_aux>;
+		eeprom-src = <&eeprom_rear_aux>;
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1352000 1800000 2850000 0>;
+		rgltr-max-voltage = <1352000 1800000 2850000 0>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@2 {
+		cell-index = <2>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x02>;
+		csiphy-sd-index = <2>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		eeprom-src = <&eeprom_front>;
+		actuator-src = <&actuator_front>;
+		led-flash-src = <&led_flash_front>;
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1352000 1800000 2850000 0>;
+		rgltr-max-voltage = <1352000 1800000 2850000 0>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670-hdk-overlay.dts b/arch/arm64/boot/dts/qcom/sda670-hdk-overlay.dts
new file mode 100644
index 0000000..a7299a4
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-hdk-overlay.dts
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include "sda670-hdk.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA670 HDK";
+	compatible = "qcom,sda670-hdk", "qcom,sda670", "qcom,hdk";
+	qcom,board-id = <0x01001F 0x00>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670-hdk.dts b/arch/arm64/boot/dts/qcom/sda670-hdk.dts
new file mode 100644
index 0000000..ed9eec9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-hdk.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sda670.dtsi"
+#include "sda670-hdk.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA670 HDK";
+	compatible = "qcom,sda670-hdk", "qcom,sda670", "qcom,hdk";
+	qcom,board-id = <0x01001F 0x00>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670-hdk.dtsi b/arch/arm64/boot/dts/qcom/sda670-hdk.dtsi
new file mode 100644
index 0000000..eec80f6
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-hdk.dtsi
@@ -0,0 +1,63 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "sdm670-qrd.dtsi"
+#include "sdm670-external-codec.dtsi"
+#include "sda670-camera-sensor-hdk.dtsi"
+
+&dsi_dual_nt36850_truly_cmd_display {
+	/delete-property/ qcom,dsi-display-active;
+};
+
+&qrd_batterydata {
+	#include "fg-gen3-batterydata-mlp466076-3250mah.dtsi"
+};
+
+&dsi_hx8399_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_hx8399_truly_cmd_display {
+	qcom,dsi-display-active;
+};
+
+&ufsphy_mem {
+	compatible = "qcom,ufs-phy-qmp-v3";
+
+	vdda-phy-supply = <&pm660l_l1>; /* 0.88v */
+	vdda-pll-supply = <&pm660_l1>; /* 1.2v */
+	vdda-phy-max-microamp = <62900>;
+	vdda-pll-max-microamp = <18300>;
+
+	status = "ok";
+};
+
+&ufshc_mem {
+	vdd-hba-supply = <&ufs_phy_gdsc>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pm660l_l4>;
+	vcc-voltage-level = <2960000 2960000>;
+	vccq2-supply = <&pm660_l8>;
+	vcc-max-microamp = <600000>;
+	vccq2-max-microamp = <600000>;
+
+	qcom,vddp-ref-clk-supply = <&pm660_l1>;
+	qcom,vddp-ref-clk-max-microamp = <100>;
+
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-svr-pinctrl-overlay.dtsi b/arch/arm64/boot/dts/qcom/sda845-svr-pinctrl-overlay.dtsi
index c76ef2b..19d1370 100644
--- a/arch/arm64/boot/dts/qcom/sda845-svr-pinctrl-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sda845-svr-pinctrl-overlay.dtsi
@@ -10,7 +10,7 @@
  * GNU General Public License for more details.
  */
 
-&cam_sensor_mclk0_active{
+&cam_sensor_mclk0_active {
 	/* MCLK0 */
 	mux {
 		pins = "gpio13";
@@ -20,7 +20,7 @@
 	config {
 		pins = "gpio13";
 		bias-disable; /* No PULL */
-		drive-strength = <8>; /* 2 MA */
+		drive-strength = <8>; /* 8 MA */
 	};
 };
 
@@ -34,19 +34,75 @@
 	config {
 		pins = "gpio13";
 		bias-pull-down; /* PULL DOWN */
-		drive-strength = <8>; /* 2 MA */
+		drive-strength = <8>; /* 8 MA */
+	};
+};
+
+&cam_sensor_mclk1_active {
+	/* MCLK1 */
+	mux {
+		pins = "gpio14";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio14";
+		bias-disable; /* No PULL */
+		drive-strength = <8>; /* 8 MA */
+	};
+};
+
+&cam_sensor_mclk1_suspend {
+	/* MCLK1 */
+	mux {
+		pins = "gpio14";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio14";
+		bias-pull-down; /* PULL DOWN */
+		drive-strength = <8>; /* 8 MA */
+	};
+};
+
+&cam_sensor_mclk2_active {
+	/* MCLK2 */
+	mux {
+		pins = "gpio15";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio15";
+		bias-disable; /* No PULL */
+		drive-strength = <8>; /* 8 MA */
+	};
+};
+
+&cam_sensor_mclk2_suspend {
+	/* MCLK2 */
+	mux {
+		pins = "gpio15";
+		function = "cam_mclk";
+	};
+
+	config {
+		pins = "gpio15";
+		bias-pull-down; /* PULL DOWN */
+		drive-strength = <8>; /* 8 MA */
 	};
 };
 
 &cam_sensor_rear_active {
 	/* RESET, AVDD LDO */
 	mux {
-		pins = "gpio8","gpio79";
+		pins = "gpio8", "gpio29";
 		function = "gpio";
 	};
 
 	config {
-		pins = "gpio8","gpio79";
+		pins = "gpio8", "gpio29";
 		bias-disable; /* No PULL */
 		drive-strength = <2>; /* 2 MA */
 	};
@@ -55,43 +111,115 @@
 &cam_sensor_rear_suspend {
 	/* RESET, AVDD LDO */
 	mux {
-		pins = "gpio8","gpio79";
+		pins = "gpio8", "gpio29";
 		function = "gpio";
 	};
 
 	config {
-		pins = "gpio8","gpio79";
+		pins = "gpio8", "gpio29";
 		bias-pull-down; /* PULL DOWN */
 		drive-strength = <2>; /* 2 MA */
 		output-low;
 	};
 };
 
-&cam_sensor_front_active{
-	/* RESET  AVDD_LDO*/
+&cam_sensor_front_active {
+	/* RESET  AVDD_LDO */
 	mux {
-		pins = "gpio26", "gpio8";
+		pins = "gpio26", "gpio12";
 		function = "gpio";
 	};
 
 	config {
-		pins = "gpio26", "gpio8";
+		pins = "gpio26", "gpio12";
 		bias-disable; /* No PULL */
 		drive-strength = <2>; /* 2 MA */
 	};
 };
 
-&cam_sensor_front_suspend{
+&cam_sensor_front_suspend {
 	/* RESET */
 	mux {
-		pins = "gpio26", "gpio8";
+		pins = "gpio26", "gpio12";
 		function = "gpio";
 	};
 
 	config {
-		pins = "gpio26", "gpio8";
+		pins = "gpio26", "gpio12";
 		bias-pull-down; /* PULL DOWN */
 		drive-strength = <2>; /* 2 MA */
 		output-low;
 	};
 };
+
+&cam_sensor_iris_active {
+	/* RESET  AVDD_LDO */
+	mux {
+		pins = "gpio21", "gpio122";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio21", "gpio122";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+&cam_sensor_iris_suspend {
+	/* RESET  AVDD_LDO */
+	mux {
+		pins = "gpio21", "gpio122";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio21", "gpio122";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+		output-low;
+	};
+};
+
+&cam_sensor_rear_vana {
+	/* AVDD_LDO */
+	mux {
+		pins = "gpio7";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio7";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+&cam_res_mgr_active {
+	/* AVDD_LDO */
+	mux {
+		pins = "gpio79";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio79";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+	};
+};
+
+&cam_res_mgr_suspend {
+	/* AVDD_LDO */
+	mux {
+		pins = "gpio79";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio79";
+		bias-disable; /* No PULL */
+		drive-strength = <2>; /* 2 MA */
+		output-low;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-svr.dtsi b/arch/arm64/boot/dts/qcom/sda845-svr.dtsi
index ce62781..1062ca6 100644
--- a/arch/arm64/boot/dts/qcom/sda845-svr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sda845-svr.dtsi
@@ -14,9 +14,12 @@
 #include "sdm845-pinctrl-overlay.dtsi"
 #include "sda845-svr-pinctrl-overlay.dtsi"
 #include "sdm845-camera-sensor-svr.dtsi"
-#include "smb1355.dtsi"
 #include <dt-bindings/clock/mdss-10nm-pll-clk.h>
 
+&qupv3_se10_i2c {
+#include "smb1355.dtsi"
+};
+
 &vendor {
 	bluetooth: bt_wcn3990 {
 		compatible = "qca,wcn3990";
@@ -249,6 +252,24 @@
 	#cooling-cells = <2>;
 };
 
+&pm8998_l10 {
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+	qcom,init-voltage = <1800000>;
+};
+
+&pm8998_l15 {
+	regulator-min-microvolt = <1504000>;
+	regulator-max-microvolt = <1504000>;
+	qcom,init-voltage = <1504000>;
+};
+
+&pm8998_l16 {
+	regulator-min-microvolt = <3312000>;
+	regulator-max-microvolt = <3312000>;
+	qcom,init-voltage = <3312000>;
+};
+
 &ufsphy_mem {
 	compatible = "qcom,ufs-phy-qmp-v3";
 
diff --git a/arch/arm64/boot/dts/qcom/sdm429-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm429-cdp-overlay.dts
index c55c2a5..5f0db81 100644
--- a/arch/arm64/boot/dts/qcom/sdm429-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429-cdp-overlay.dts
@@ -14,9 +14,6 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clock/msm-clocks-8953.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "sdm429-cdp.dtsi"
 
 / {
diff --git a/arch/arm64/boot/dts/qcom/sdm429-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm429-mtp-overlay.dts
index 7735b35..571f0fc 100644
--- a/arch/arm64/boot/dts/qcom/sdm429-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429-mtp-overlay.dts
@@ -14,9 +14,6 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clock/msm-clocks-8953.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "sdm429-mtp.dtsi"
 
 / {
diff --git a/arch/arm64/boot/dts/qcom/sdm429-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm429-qrd-overlay.dts
index fae68c9..8e12295 100644
--- a/arch/arm64/boot/dts/qcom/sdm429-qrd-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429-qrd-overlay.dts
@@ -14,9 +14,6 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clock/msm-clocks-8953.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "sdm429-qrd.dtsi"
 
 / {
diff --git a/arch/arm64/boot/dts/qcom/sdm429.dtsi b/arch/arm64/boot/dts/qcom/sdm429.dtsi
index 19df054..b52ee2d 100644
--- a/arch/arm64/boot/dts/qcom/sdm429.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm429.dtsi
@@ -146,6 +146,16 @@
 };
 
 &soc {
+	devfreq_spdm_cpu {
+		status = "disabled";
+	};
+
+	devfreq_spdm_gov {
+		status = "disabled";
+	};
+};
+
+&soc {
 	/delete-node/ qcom,cpu-clock-8939@b111050;
 	clock_cpu: qcom,cpu-clock-8939@b111050 {
 		compatible = "qcom,cpu-clock-sdm429";
diff --git a/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-cdp.dtsi
index 5e2c740..eae8c56 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-cdp.dtsi
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/clock/msm-clocks-8952.h>
+
 &cci {
 	actuator0: qcom,actuator@0 {
 		cell-index = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-mtp.dtsi
index 5e2c740..eae8c56 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-mtp.dtsi
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/clock/msm-clocks-8952.h>
+
 &cci {
 	actuator0: qcom,actuator@0 {
 		cell-index = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-qrd.dtsi
index c2c9c79..ef0e977 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-camera-sensor-qrd.dtsi
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/clock/msm-clocks-8952.h>
+
 &cci {
 	actuator0: qcom,actuator@0 {
 		cell-index = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm439-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm439-cdp-overlay.dts
index 5e86672..87239b9 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm439-cdp-overlay.dts
@@ -14,9 +14,6 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clock/msm-clocks-8953.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "sdm439-cdp.dtsi"
 
 / {
diff --git a/arch/arm64/boot/dts/qcom/sdm439-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm439-cdp.dtsi
index fd66f4b..cef5534 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-cdp.dtsi
@@ -233,12 +233,25 @@
 
 &dsi_hx8399c_hd_vid {
 	/delete-property/ qcom,mdss-dsi-panel-timings;
-	qcom,mdss-dsi-panel-timings-phy-12nm = [08 06 0a 02 00 04 02 08];
+	qcom,mdss-dsi-panel-timings-phy-12nm = [09 06 0a 02 00 05 02 08];
 	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
 	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
 	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
 	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	qcom,esd-check-enabled;
+	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+	qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
+	qcom,mdss-dsi-panel-status-value = <0x9d 0x9d 0x9d 0x9d>;
+	qcom,mdss-dsi-panel-on-check-value = <0x9d 0x9d 0x9d 0x9d>;
+	qcom,mdss-dsi-panel-status-read-length = <4>;
+	qcom,mdss-dsi-panel-max-error-count = <3>;
+	qcom,mdss-dsi-min-refresh-rate = <48>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update =
+		"dfps_immediate_porch_mode_vfp";
 };
 
 &dsi_nt35695b_truly_fhd_cmd {
@@ -555,3 +568,27 @@
 	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
 	/delete-node/ qcom,mdss-dsi-display-timings;
 };
+
+&dsi_truly_1080_vid {
+	/delete-property/ qcom,mdss-dsi-panel-timings;
+	qcom,mdss-dsi-panel-timings-phy-12nm = [17 0a 0f 06 02 08 06 0e];
+	qcom,mdss-dsi-t-clk-post = <0x02>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
+	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
+	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_truly_1080_cmd {
+	/delete-property/ qcom,mdss-dsi-panel-timings;
+	qcom,mdss-dsi-panel-timings-phy-12nm = [17 0a 0f 06 02 08 06 0e];
+	qcom,mdss-dsi-t-clk-post = <0x02>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
+	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
+	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm439-external-codec-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm439-external-codec-mtp-overlay.dts
index 468f514..37741b2 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-external-codec-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm439-external-codec-mtp-overlay.dts
@@ -14,10 +14,6 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clock/msm-clocks-8953.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-
 #include "sdm439-mtp.dtsi"
 #include "sdm439-external-codec.dtsi"
 
diff --git a/arch/arm64/boot/dts/qcom/sdm439-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm439-mtp-overlay.dts
index 8b6c6fb..a7c5f4e 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm439-mtp-overlay.dts
@@ -14,9 +14,6 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clock/msm-clocks-8953.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "sdm439-mtp.dtsi"
 
 / {
diff --git a/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi
index 29e0d72..913d079 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi
@@ -208,10 +208,441 @@
 
 &dsi_hx8399c_hd_vid {
 	/delete-property/ qcom,mdss-dsi-panel-timings;
-	qcom,mdss-dsi-panel-timings-phy-12nm = [08 06 0a 02 00 04 02 08];
+	qcom,mdss-dsi-panel-timings-phy-12nm = [09 06 0a 02 00 05 02 08];
 	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
 	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
 	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
 	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	qcom,esd-check-enabled;
+	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+	qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
+	qcom,mdss-dsi-panel-status-value = <0x9d 0x9d 0x9d 0x9d>;
+	qcom,mdss-dsi-panel-on-check-value = <0x9d 0x9d 0x9d 0x9d>;
+	qcom,mdss-dsi-panel-status-read-length = <4>;
+	qcom,mdss-dsi-panel-max-error-count = <3>;
+	qcom,mdss-dsi-min-refresh-rate = <48>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update =
+		"dfps_immediate_porch_mode_vfp";
+};
+
+&dsi_nt35695b_truly_fhd_cmd {
+	qcom,mdss-dsi-panel-width = <1080>;
+	qcom,mdss-dsi-panel-height = <1920>;
+	qcom,mdss-dsi-h-front-porch = <120>;
+	qcom,mdss-dsi-h-back-porch = <60>;
+	qcom,mdss-dsi-h-pulse-width = <12>;
+	qcom,mdss-dsi-h-sync-skew = <0>;
+	qcom,mdss-dsi-v-back-porch = <2>;
+	qcom,mdss-dsi-v-front-porch = <12>;
+	qcom,mdss-dsi-v-pulse-width = <2>;
+	qcom,mdss-dsi-h-sync-pulse = <0>;
+	qcom,mdss-dsi-h-left-border = <0>;
+	qcom,mdss-dsi-h-right-border = <0>;
+	qcom,mdss-dsi-v-top-border = <0>;
+	qcom,mdss-dsi-v-bottom-border = <0>;
+	qcom,mdss-dsi-panel-framerate = <60>;
+	qcom,mdss-dsi-on-command =
+		[15 01 00 00 10 00 02 ff 20
+		15 01 00 00 00 00 02 fb 01
+		15 01 00 00 00 00 02 00 01
+		15 01 00 00 00 00 02 01 55
+		15 01 00 00 00 00 02 02 45
+		15 01 00 00 00 00 02 03 55
+		15 01 00 00 00 00 02 05 50
+		15 01 00 00 00 00 02 06 a8
+		15 01 00 00 00 00 02 07 ad
+		15 01 00 00 00 00 02 08 0c
+		15 01 00 00 00 00 02 0b aa
+		15 01 00 00 00 00 02 0c aa
+		15 01 00 00 00 00 02 0e b0
+		15 01 00 00 00 00 02 0f b3
+		15 01 00 00 00 00 02 11 28
+		15 01 00 00 00 00 02 12 10
+		15 01 00 00 00 00 02 13 01
+		15 01 00 00 00 00 02 14 4a
+		15 01 00 00 00 00 02 15 12
+		15 01 00 00 00 00 02 16 12
+		15 01 00 00 00 00 02 30 01
+		15 01 00 00 00 00 02 72 11
+		15 01 00 00 00 00 02 58 82
+		15 01 00 00 00 00 02 59 00
+		15 01 00 00 00 00 02 5a 02
+		15 01 00 00 00 00 02 5b 00
+		15 01 00 00 00 00 02 5c 82
+		15 01 00 00 00 00 02 5d 80
+		15 01 00 00 00 00 02 5e 02
+		15 01 00 00 00 00 02 5f 00
+		15 01 00 00 00 00 02 ff 24
+		15 01 00 00 00 00 02 fb 01
+		15 01 00 00 00 00 02 00 01
+		15 01 00 00 00 00 02 01 0b
+		15 01 00 00 00 00 02 02 0c
+		15 01 00 00 00 00 02 03 89
+		15 01 00 00 00 00 02 04 8a
+		15 01 00 00 00 00 02 05 0f
+		15 01 00 00 00 00 02 06 10
+		15 01 00 00 00 00 02 07 10
+		15 01 00 00 00 00 02 08 1c
+		15 01 00 00 00 00 02 09 00
+		15 01 00 00 00 00 02 0a 00
+		15 01 00 00 00 00 02 0b 00
+		15 01 00 00 00 00 02 0c 00
+		15 01 00 00 00 00 02 0d 13
+		15 01 00 00 00 00 02 0e 15
+		15 01 00 00 00 00 02 0f 17
+		15 01 00 00 00 00 02 10 01
+		15 01 00 00 00 00 02 11 0b
+		15 01 00 00 00 00 02 12 0c
+		15 01 00 00 00 00 02 13 89
+		15 01 00 00 00 00 02 14 8a
+		15 01 00 00 00 00 02 15 0f
+		15 01 00 00 00 00 02 16 10
+		15 01 00 00 00 00 02 17 10
+		15 01 00 00 00 00 02 18 1c
+		15 01 00 00 00 00 02 19 00
+		15 01 00 00 00 00 02 1a 00
+		15 01 00 00 00 00 02 1b 00
+		15 01 00 00 00 00 02 1c 00
+		15 01 00 00 00 00 02 1d 13
+		15 01 00 00 00 00 02 1e 15
+		15 01 00 00 00 00 02 1f 17
+		15 01 00 00 00 00 02 20 00
+		15 01 00 00 00 00 02 21 01
+		15 01 00 00 00 00 02 22 00
+		15 01 00 00 00 00 02 23 40
+		15 01 00 00 00 00 02 24 40
+		15 01 00 00 00 00 02 25 6d
+		15 01 00 00 00 00 02 26 40
+		15 01 00 00 00 00 02 27 40
+		15 01 00 00 00 00 02 29 d8
+		15 01 00 00 00 00 02 2a 2a
+		15 01 00 00 00 00 02 4b 03
+		15 01 00 00 00 00 02 4c 11
+		15 01 00 00 00 00 02 4d 10
+		15 01 00 00 00 00 02 4e 01
+		15 01 00 00 00 00 02 4f 01
+		15 01 00 00 00 00 02 50 10
+		15 01 00 00 00 00 02 51 00
+		15 01 00 00 00 00 02 52 80
+		15 01 00 00 00 00 02 53 00
+		15 01 00 00 00 00 02 54 07
+		15 01 00 00 00 00 02 55 25
+		15 01 00 00 00 00 02 56 00
+		15 01 00 00 00 00 02 58 07
+		15 01 00 00 00 00 02 5b 43
+		15 01 00 00 00 00 02 5c 00
+		15 01 00 00 00 00 02 5f 73
+		15 01 00 00 00 00 02 60 73
+		15 01 00 00 00 00 02 63 22
+		15 01 00 00 00 00 02 64 00
+		15 01 00 00 00 00 02 67 08
+		15 01 00 00 00 00 02 68 04
+		15 01 00 00 00 00 02 7a 80
+		15 01 00 00 00 00 02 7b 91
+		15 01 00 00 00 00 02 7c d8
+		15 01 00 00 00 00 02 7d 60
+		15 01 00 00 00 00 02 93 06
+		15 01 00 00 00 00 02 94 06
+		15 01 00 00 00 00 02 8a 00
+		15 01 00 00 00 00 02 9b 0f
+		15 01 00 00 00 00 02 b3 c0
+		15 01 00 00 00 00 02 b4 00
+		15 01 00 00 00 00 02 b5 00
+		15 01 00 00 00 00 02 b6 21
+		15 01 00 00 00 00 02 b7 22
+		15 01 00 00 00 00 02 b8 07
+		15 01 00 00 00 00 02 b9 07
+		15 01 00 00 00 00 02 ba 22
+		15 01 00 00 00 00 02 bd 20
+		15 01 00 00 00 00 02 be 07
+		15 01 00 00 00 00 02 bf 07
+		15 01 00 00 00 00 02 c1 6d
+		15 01 00 00 00 00 02 c4 24
+		15 01 00 00 00 00 02 e3 00
+		15 01 00 00 00 00 02 ec 00
+		15 01 00 00 00 00 02 ff 10
+		15 01 00 00 00 00 02 bb 10
+		15 01 00 00 00 00 02 35 00
+		05 01 00 00 78 00 02 11 00
+		05 01 00 00 78 00 02 29 00];
+	qcom,mdss-dsi-off-command = [05 01 00 00 14
+		00 02 28 00 05 01 00 00 78 00 02 10 00];
+	qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+	qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+	qcom,mdss-dsi-panel-timings-phy-12nm = [17 0a 0f 06 03 08 06 0e];
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
+	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
+	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	qcom,ulps-enabled;
+	qcom,esd-check-enabled;
+	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+	qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
+	qcom,mdss-dsi-panel-status-value = <0x9c>;
+	qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+	qcom,mdss-dsi-panel-status-read-length = <1>;
+	qcom,mdss-dsi-panel-max-error-count = <3>;
+	/delete-node/ qcom,mdss-dsi-display-timings;
+};
+
+&dsi_nt35695b_truly_fhd_video {
+	qcom,mdss-dsi-panel-width = <1080>;
+	qcom,mdss-dsi-panel-height = <1920>;
+	qcom,mdss-dsi-h-front-porch = <120>;
+	qcom,mdss-dsi-h-back-porch = <60>;
+	qcom,mdss-dsi-h-pulse-width = <12>;
+	qcom,mdss-dsi-h-sync-skew = <0>;
+	qcom,mdss-dsi-h-sync-pulse = <0>;
+	qcom,mdss-dsi-v-back-porch = <2>;
+	qcom,mdss-dsi-v-front-porch = <12>;
+	qcom,mdss-dsi-v-pulse-width = <2>;
+	qcom,mdss-dsi-h-left-border = <0>;
+	qcom,mdss-dsi-h-right-border = <0>;
+	qcom,mdss-dsi-v-top-border = <0>;
+	qcom,mdss-dsi-v-bottom-border = <0>;
+	qcom,mdss-dsi-panel-framerate = <60>;
+	qcom,mdss-dsi-on-command =
+		[15 01 00 00 10 00 02 ff 20
+		15 01 00 00 00 00 02 fb 01
+		15 01 00 00 00 00 02 00 01
+		15 01 00 00 00 00 02 01 55
+		15 01 00 00 00 00 02 02 45
+		15 01 00 00 00 00 02 03 55
+		15 01 00 00 00 00 02 05 50
+		15 01 00 00 00 00 02 06 a8
+		15 01 00 00 00 00 02 07 ad
+		15 01 00 00 00 00 02 08 0c
+		15 01 00 00 00 00 02 0b aa
+		15 01 00 00 00 00 02 0c aa
+		15 01 00 00 00 00 02 0e b0
+		15 01 00 00 00 00 02 0f b3
+		15 01 00 00 00 00 02 11 28
+		15 01 00 00 00 00 02 12 10
+		15 01 00 00 00 00 02 13 01
+		15 01 00 00 00 00 02 14 4a
+		15 01 00 00 00 00 02 15 12
+		15 01 00 00 00 00 02 16 12
+		15 01 00 00 00 00 02 30 01
+		15 01 00 00 00 00 02 72 11
+		15 01 00 00 00 00 02 58 82
+		15 01 00 00 00 00 02 59 00
+		15 01 00 00 00 00 02 5a 02
+		15 01 00 00 00 00 02 5b 00
+		15 01 00 00 00 00 02 5c 82
+		15 01 00 00 00 00 02 5d 80
+		15 01 00 00 00 00 02 5e 02
+		15 01 00 00 00 00 02 5f 00
+		15 01 00 00 00 00 02 ff 24
+		15 01 00 00 00 00 02 fb 01
+		15 01 00 00 00 00 02 00 01
+		15 01 00 00 00 00 02 01 0b
+		15 01 00 00 00 00 02 02 0c
+		15 01 00 00 00 00 02 03 89
+		15 01 00 00 00 00 02 04 8a
+		15 01 00 00 00 00 02 05 0f
+		15 01 00 00 00 00 02 06 10
+		15 01 00 00 00 00 02 07 10
+		15 01 00 00 00 00 02 08 1c
+		15 01 00 00 00 00 02 09 00
+		15 01 00 00 00 00 02 0a 00
+		15 01 00 00 00 00 02 0b 00
+		15 01 00 00 00 00 02 0c 00
+		15 01 00 00 00 00 02 0d 13
+		15 01 00 00 00 00 02 0e 15
+		15 01 00 00 00 00 02 0f 17
+		15 01 00 00 00 00 02 10 01
+		15 01 00 00 00 00 02 11 0b
+		15 01 00 00 00 00 02 12 0c
+		15 01 00 00 00 00 02 13 89
+		15 01 00 00 00 00 02 14 8a
+		15 01 00 00 00 00 02 15 0f
+		15 01 00 00 00 00 02 16 10
+		15 01 00 00 00 00 02 17 10
+		15 01 00 00 00 00 02 18 1c
+		15 01 00 00 00 00 02 19 00
+		15 01 00 00 00 00 02 1a 00
+		15 01 00 00 00 00 02 1b 00
+		15 01 00 00 00 00 02 1c 00
+		15 01 00 00 00 00 02 1d 13
+		15 01 00 00 00 00 02 1e 15
+		15 01 00 00 00 00 02 1f 17
+		15 01 00 00 00 00 02 20 00
+		15 01 00 00 00 00 02 21 01
+		15 01 00 00 00 00 02 22 00
+		15 01 00 00 00 00 02 23 40
+		15 01 00 00 00 00 02 24 40
+		15 01 00 00 00 00 02 25 6d
+		15 01 00 00 00 00 02 26 40
+		15 01 00 00 00 00 02 27 40
+		15 01 00 00 00 00 02 29 d8
+		15 01 00 00 00 00 02 2a 2a
+		15 01 00 00 00 00 02 4b 03
+		15 01 00 00 00 00 02 4c 11
+		15 01 00 00 00 00 02 4d 10
+		15 01 00 00 00 00 02 4e 01
+		15 01 00 00 00 00 02 4f 01
+		15 01 00 00 00 00 02 50 10
+		15 01 00 00 00 00 02 51 00
+		15 01 00 00 00 00 02 52 80
+		15 01 00 00 00 00 02 53 00
+		15 01 00 00 00 00 02 54 07
+		15 01 00 00 00 00 02 55 25
+		15 01 00 00 00 00 02 56 00
+		15 01 00 00 00 00 02 58 07
+		15 01 00 00 00 00 02 5b 43
+		15 01 00 00 00 00 02 5c 00
+		15 01 00 00 00 00 02 5f 73
+		15 01 00 00 00 00 02 60 73
+		15 01 00 00 00 00 02 63 22
+		15 01 00 00 00 00 02 64 00
+		15 01 00 00 00 00 02 67 08
+		15 01 00 00 00 00 02 68 04
+		15 01 00 00 00 00 02 7a 80
+		15 01 00 00 00 00 02 7b 91
+		15 01 00 00 00 00 02 7c d8
+		15 01 00 00 00 00 02 7d 60
+		15 01 00 00 00 00 02 93 06
+		15 01 00 00 00 00 02 94 06
+		15 01 00 00 00 00 02 8a 00
+		15 01 00 00 00 00 02 9b 0f
+		15 01 00 00 00 00 02 b3 c0
+		15 01 00 00 00 00 02 b4 00
+		15 01 00 00 00 00 02 b5 00
+		15 01 00 00 00 00 02 b6 21
+		15 01 00 00 00 00 02 b7 22
+		15 01 00 00 00 00 02 b8 07
+		15 01 00 00 00 00 02 b9 07
+		15 01 00 00 00 00 02 ba 22
+		15 01 00 00 00 00 02 bd 20
+		15 01 00 00 00 00 02 be 07
+		15 01 00 00 00 00 02 bf 07
+		15 01 00 00 00 00 02 c1 6d
+		15 01 00 00 00 00 02 c4 24
+		15 01 00 00 00 00 02 e3 00
+		15 01 00 00 00 00 02 ec 00
+		15 01 00 00 00 00 02 ff 10
+		15 01 00 00 00 00 02 bb 03
+		05 01 00 00 78 00 02 11 00
+		05 01 00 00 78 00 02 29 00];
+	qcom,mdss-dsi-off-command = [05 01 00 00
+		14 00 02 28 00 05 01 00 00 78 00
+		02 10 00];
+	qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+	qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+	qcom,mdss-dsi-panel-timings-phy-12nm = [17 0a 0f 06 03 08 06 0e];
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
+	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
+	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	/delete-node/ qcom,mdss-dsi-display-timings;
+};
+
+&dsi_truly_1080_vid {
+	/delete-property/ qcom,mdss-dsi-panel-timings;
+	qcom,mdss-dsi-panel-timings-phy-12nm = [17 0a 0f 06 02 08 06 0e];
+	qcom,mdss-dsi-t-clk-post = <0x02>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
+	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
+	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_truly_1080_cmd {
+	/delete-property/ qcom,mdss-dsi-panel-timings;
+	qcom,mdss-dsi-panel-timings-phy-12nm = [17 0a 0f 06 02 08 06 0e];
+	qcom,mdss-dsi-t-clk-post = <0x02>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
+	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
+	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&i2c_2 {
+#include "smb1355.dtsi"
+};
+
+&pmi632_gpios {
+	smb_en {
+		smb_en_default: smb_en_default {
+			pins = "gpio2";
+			function = "func1";
+			output-enable;
+		};
+	};
+
+	pmi632_sense {
+		/* GPIO 7 and 8 are external-sense pins for PMI632 */
+		pmi632_sense_default: pmi632_sense_default {
+			pins = "gpio7", "gpio8";
+			bias-high-impedance;	/* disable the GPIO */
+			bias-disable;		/* no-pull */
+		};
+	};
+
+	pmi632_ctm {
+		/* Disable GPIO1 for h/w base mitigation */
+		pmi632_ctm_default: pmi632_ctm_default {
+			pins = "gpio1";
+			bias-high-impedance;	/* disable the GPIO */
+			bias-disable;		/* no-pull */
+		};
+	};
+};
+
+&tlmm {
+	smb_int_default: smb_int_default {
+		mux {
+			pins = "gpio61";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio61";
+			drive-strength = <2>;
+			bias-pull-up;
+			input-enable;
+		};
+	};
+};
+
+&smb1355_0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default
+		&smb_en_default &pmi632_sense_default &pmi632_ctm_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <61 IRQ_TYPE_LEVEL_LOW>;
+	smb1355_charger_0: qcom,smb1355-charger@1000 {
+		status ="ok";
+		/delete-property/ io-channels;
+		/delete-property/ io-channels-names;
+		qcom,parallel-mode = <1>;
+		qcom,disable-ctm;
+		qcom,hw-die-temp-mitigation;
+	};
+};
+
+&smb1355_1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default
+		&smb_en_default &pmi632_sense_default &pmi632_ctm_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <61 IRQ_TYPE_LEVEL_LOW>;
+	smb1355_charger_1: qcom,smb1355-charger@1000 {
+		status ="ok";
+		/delete-property/ io-channels;
+		/delete-property/ io-channels-names;
+		qcom,parallel-mode = <1>;
+		qcom,disable-ctm;
+		qcom,hw-die-temp-mitigation;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm439-pm8953.dtsi b/arch/arm64/boot/dts/qcom/sdm439-pm8953.dtsi
index d34c34a..615489e 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-pm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-pm8953.dtsi
@@ -271,6 +271,21 @@
 			};
 		};
 	};
+
+	pa-therm0 {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8953_adc_tm 0x36>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+	};
 };
 
 &pm8953_vadc {
diff --git a/arch/arm64/boot/dts/qcom/sdm439-pmi632.dtsi b/arch/arm64/boot/dts/qcom/sdm439-pmi632.dtsi
index bc2ba9f..9c4120c 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-pmi632.dtsi
@@ -43,6 +43,27 @@
 	qcom,battery-data = <&mtp_batterydata>;
 };
 
+&pmi632_vadc {
+	pinctrl-names = "default";
+	pinctrl-0 = <&quiet_therm_default &smb_therm_default>;
+};
+
+&pmi632_gpios {
+	quiet_therm {
+		quiet_therm_default: quiet_therm_default {
+			pins = "gpio3";
+			bias-high-impedance;
+		};
+	};
+
+	smb_therm {
+		smb_therm_default: smb_therm_default {
+			pins = "gpio4";
+			bias-high-impedance;
+		};
+	};
+};
+
 &pm8953_typec {
 	status = "disabled";
 };
@@ -113,75 +134,168 @@
 		thermal-governor = "step_wise";
 
 		trips {
-			batt_trip1: batt-trip1 {
-				temperature = <41000>;
-				hysteresis = <2000>;
-				type = "passive";
-			};
-			cpus_trip: cpus-trip {
+			quiet_modem_439_trip1: quiet-modem-trip0 {
 				temperature = <44000>;
-				hysteresis = <0>;
-				type = "passive";
-			};
-			batt_trip2: batt-trip2 {
-				temperature = <45000>;
 				hysteresis = <4000>;
 				type = "passive";
 			};
-			gpu_trip: gpu-trip {
+			quiet_modem_439_trip2: quiet-modem-trip1 {
+				temperature = <46000>;
+				hysteresis = <4000>;
+				type = "passive";
+			};
+			quiet_cpus_439_trip: quiet-cpus-trip {
+				temperature = <48000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			quiet_modem_439_trip3: quiet-modem-trip2 {
+				temperature = <48000>;
+				hysteresis = <2000>;
+				type = "passive";
+			};
+			quiet_batt_439_trip1: quiet-batt-trip1 {
+				temperature = <48000>;
+				hysteresis = <4000>;
+				type = "passive";
+			};
+			quiet_gpu_439_trip: quiet-gpu-trip {
 				temperature = <50000>;
 				hysteresis = <0>;
 				type = "passive";
 			};
+			quiet_batt_439_trip2: quiet-batt-trip2 {
+				temperature = <50000>;
+				hysteresis = <2000>;
+				type = "passive";
+			};
+			quiet_batt_439_trip3: quiet-batt-trip3 {
+				temperature = <52000>;
+				hysteresis = <2000>;
+				type = "passive";
+			};
+
+			quiet_batt_439_trip4: quiet-batt-trip4 {
+				temperature = <54000>;
+				hysteresis = <2000>;
+				type = "passive";
+			};
+			quiet_modem_439_trip4: quiet-modem-trip3 {
+				temperature = <55000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+			quiet_batt_439_trip5: quiet-batt-trip5 {
+				temperature = <56000>;
+				hysteresis = <2000>;
+				type = "passive";
+			};
 		};
 
 		cooling-maps {
 			skin_cpu0 {
-				trip = <&cpus_trip>;
-				/* throttle from fmax to 1094400KHz */
-				cooling-device = <&CPU0 THERMAL_NO_LIMIT 5>;
+				trip = <&quiet_cpus_439_trip>;
+				/* throttle from fmax to 1497600KHz */
+				cooling-device = <&CPU0 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-3)>;
 			};
 			skin_cpu1 {
-				trip = <&cpus_trip>;
-				cooling-device = <&CPU1 THERMAL_NO_LIMIT 5>;
+				trip = <&quiet_cpus_439_trip>;
+				cooling-device = <&CPU1 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-3)>;
 			};
 			skin_cpu2 {
-				trip = <&cpus_trip>;
-				cooling-device = <&CPU2 THERMAL_NO_LIMIT 5>;
+				trip = <&quiet_cpus_439_trip>;
+				cooling-device = <&CPU2 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-3)>;
 			};
 			skin_cpu3 {
-				trip = <&cpus_trip>;
-				cooling-device = <&CPU3 THERMAL_NO_LIMIT 5>;
+				trip = <&quiet_cpus_439_trip>;
+				cooling-device = <&CPU3 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-3)>;
 			};
 			skin_cpu4 {
-				trip = <&cpus_trip>;
-				cooling-device = <&CPU4 THERMAL_NO_LIMIT 3>;
+				trip = <&quiet_cpus_439_trip>;
+				/* throttle from fmax to 1171200KHz */
+				cooling-device = <&CPU4 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-3)>;
 			};
 			skin_cpu5 {
-				trip = <&cpus_trip>;
-				cooling-device = <&CPU5 THERMAL_NO_LIMIT 3>;
+				trip = <&quiet_cpus_439_trip>;
+				cooling-device = <&CPU5 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-3)>;
 			};
 			skin_cpu6 {
-				trip = <&cpus_trip>;
-				cooling-device = <&CPU6 THERMAL_NO_LIMIT 3>;
+				trip = <&quiet_cpus_439_trip>;
+				cooling-device = <&CPU6 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-3)>;
 			};
 			skin_cpu7 {
-				trip = <&cpus_trip>;
-				cooling-device = <&CPU7 THERMAL_NO_LIMIT 3>;
+				trip = <&quiet_cpus_439_trip>;
+				cooling-device = <&CPU7 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-3)>;
 			};
 			skin_gpu {
-				trip = <&gpu_trip>;
-				/* throttle from fmax to 375000000Hz */
-				cooling-device = <&msm_gpu THERMAL_NO_LIMIT 2>;
+				trip = <&quiet_gpu_439_trip>;
+				/* throttle from fmax to 510000000Hz */
+				cooling-device = <&msm_gpu THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-2)>;
+			};
+			modem_proc_lvl1 {
+				trip = <&quiet_modem_439_trip1>;
+				cooling-device = <&modem_proc 1 1>;
+			};
+			modem_proc_lvl2 {
+				trip = <&quiet_modem_439_trip4>;
+				cooling-device = <&modem_proc 3 3>;
+			};
+			modem_lvl1 {
+				trip = <&quiet_modem_439_trip2>;
+				cooling-device = <&modem_pa 1 1>;
+			};
+			modem_lvl2 {
+				trip = <&quiet_modem_439_trip3>;
+				cooling-device = <&modem_pa 2 2>;
+			};
+			modem_lvl3 {
+				trip = <&quiet_modem_439_trip4>;
+				cooling-device = <&modem_pa 3 3>;
 			};
 			battery_lvl1 {
-				trip = <&batt_trip1>;
-				cooling-device = <&pmi632_charger 4 4>;
+				trip = <&quiet_batt_439_trip1>;
+				cooling-device = <&pmi632_charger 1 1>;
 			};
 			battery_lvl2 {
-				trip = <&batt_trip2>;
+				trip = <&quiet_batt_439_trip2>;
+				cooling-device = <&pmi632_charger 2 2>;
+			};
+			battery_lvl3 {
+				trip = <&quiet_batt_439_trip3>;
+				cooling-device = <&pmi632_charger 3 3>;
+			};
+			battery_lvl4 {
+				trip = <&quiet_batt_439_trip4>;
+				cooling-device = <&pmi632_charger 4 4>;
+			};
+			battery_lvl5 {
+				trip = <&quiet_batt_439_trip5>;
 				cooling-device = <&pmi632_charger 5 5>;
 			};
 		};
 	};
+
+	quiet-therm-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pmi632_adc_tm 0x53>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm439-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm439-qrd-overlay.dts
index ed6b2ad..46a7856 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-qrd-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm439-qrd-overlay.dts
@@ -14,9 +14,6 @@
 /dts-v1/;
 /plugin/;
 
- #include <dt-bindings/gpio/gpio.h>
- #include <dt-bindings/clock/msm-clocks-8953.h>
- #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "sdm439-qrd.dtsi"
 
 / {
diff --git a/arch/arm64/boot/dts/qcom/sdm439-qrd.dts b/arch/arm64/boot/dts/qcom/sdm439-qrd.dts
index b8a9f2b..4a2fbff 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sdm439-qrd.dts
@@ -22,10 +22,3 @@
 	qcom,board-id = <0xb 2>;
 	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
 };
-
-&pmi632_vadc {
-	chan@4a {
-		qcom,scale-function = <22>;
-	};
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi
index 1979f4e..a3c5b1f 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi
@@ -115,7 +115,7 @@
 	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
 	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
 
-	cd-gpios = <&tlmm 67 0x1>;
+	cd-gpios = <&tlmm 67 0x0>;
 
 	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000
 								200000000>;
@@ -141,6 +141,24 @@
 			gpio-key,wakeup;
 		};
 	};
+
+	fpc1020 {
+		compatible = "fpc,fpc1020";
+		interrupt-parent = <&tlmm>;
+		interrupts = <48 0>;
+		fpc,gpio_rst = <&tlmm 124 0x0>;
+		fpc,gpio_irq = <&tlmm 48 0>;
+		vcc_spi-supply = <&pm8953_l5>;
+		vdd_io-supply  = <&pm8953_l5>;
+		vdd_ana-supply = <&pm8953_l5>;
+		fpc,enable-on-boot;
+		pinctrl-names = "fpc1020_reset_reset",
+				"fpc1020_reset_active",
+				"fpc1020_irq_active";
+		pinctrl-0 = <&fpc_reset_low>;
+		pinctrl-1 = <&fpc_reset_high>;
+		pinctrl-2 = <&fpc_int_low>;
+	};
 };
 
 &tlmm {
@@ -305,10 +323,108 @@
 
 &dsi_hx8399c_hd_vid {
 	/delete-property/ qcom,mdss-dsi-panel-timings;
-	qcom,mdss-dsi-panel-timings-phy-12nm = [08 06 0a 02 00 04 02 08];
+	qcom,mdss-dsi-panel-timings-phy-12nm = [09 06 0a 02 00 05 02 08];
 	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
 	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
 	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
 	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+	qcom,esd-check-enabled;
+	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+	qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
+	qcom,mdss-dsi-panel-status-value = <0x9d 0x9d 0x9d 0x9d>;
+	qcom,mdss-dsi-panel-on-check-value = <0x9d 0x9d 0x9d 0x9d>;
+	qcom,mdss-dsi-panel-status-read-length = <4>;
+	qcom,mdss-dsi-panel-max-error-count = <3>;
+	qcom,mdss-dsi-min-refresh-rate = <48>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update =
+		"dfps_immediate_porch_mode_vfp";
+};
+
+&i2c_2 {
+#include "smb1355.dtsi"
+};
+
+&pmi632_vadc {
+	chan@4a {
+		qcom,scale-function = <22>;
+	};
+};
+
+&pmi632_gpios {
+	smb_en {
+		smb_en_default: smb_en_default {
+			pins = "gpio2";
+			function = "func1";
+			output-enable;
+		};
+	};
+
+	pmi632_sense {
+		/* GPIO 7 and 8 are external-sense pins for PMI632 */
+		pmi632_sense_default: pmi632_sense_default {
+			pins = "gpio7", "gpio8";
+			bias-high-impedance;	/* disable the GPIO */
+			bias-disable;		/* no-pull */
+		};
+	};
+
+	pmi632_ctm {
+		/* Disable GPIO1 for h/w base mitigation */
+		pmi632_ctm_default: pmi632_ctm_default {
+			pins = "gpio1";
+			bias-high-impedance;	/* disable the GPIO */
+			bias-disable;		/* no-pull */
+		};
+	};
+};
+
+&tlmm {
+	smb_int_default: smb_int_default {
+		mux {
+			pins = "gpio61";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio61";
+			drive-strength = <2>;
+			bias-pull-up;
+			input-enable;
+		};
+	};
+};
+
+&smb1355_0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default
+		&smb_en_default &pmi632_sense_default &pmi632_ctm_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <61 IRQ_TYPE_LEVEL_LOW>;
+	smb1355_charger_0: qcom,smb1355-charger@1000 {
+		status ="ok";
+		/delete-property/ io-channels;
+		/delete-property/ io-channels-names;
+		qcom,parallel-mode = <1>;
+		qcom,disable-ctm;
+		qcom,hw-die-temp-mitigation;
+	};
+};
+
+&smb1355_1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default
+		&smb_en_default &pmi632_sense_default &pmi632_ctm_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <61 IRQ_TYPE_LEVEL_LOW>;
+	smb1355_charger_1: qcom,smb1355-charger@1000 {
+		status ="ok";
+		/delete-property/ io-channels;
+		/delete-property/ io-channels-names;
+		qcom,parallel-mode = <1>;
+		qcom,disable-ctm;
+		qcom,hw-die-temp-mitigation;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm439-rcm-overlay.dts b/arch/arm64/boot/dts/qcom/sdm439-rcm-overlay.dts
new file mode 100644
index 0000000..be0de06
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm439-rcm-overlay.dts
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include "sdm439-rcm.dtsi"
+
+/ {
+	model = "RCM";
+	qcom,board-id = <21 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm439-rcm.dts b/arch/arm64/boot/dts/qcom/sdm439-rcm.dts
new file mode 100644
index 0000000..71d02a0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm439-rcm.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm439.dtsi"
+#include "sdm439-rcm.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM439 RCM";
+	compatible = "qcom,sdm439-cdp", "qcom,sdm439", "qcom,cdp";
+	qcom,board-id = <21 1>;
+	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm439-rcm.dtsi b/arch/arm64/boot/dts/qcom/sdm439-rcm.dtsi
new file mode 100644
index 0000000..4ba4c00
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm439-rcm.dtsi
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm439-cdp.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi
index e2f2dea..414e8fe 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi
@@ -67,7 +67,7 @@
 		pm8953_cx_cdev: regulator-cx-cdev {
 			compatible = "qcom,regulator-cooling-device";
 			regulator-cdev-supply = <&pm8953_s2_floor_level>;
-			regulator-levels = <RPM_SMD_REGULATOR_LEVEL_NOM
+			regulator-levels = <RPM_SMD_REGULATOR_LEVEL_NOM_PLUS
 					RPM_SMD_REGULATOR_LEVEL_RETENTION>;
 			#cooling-cells = <2>;
 		};
@@ -378,12 +378,12 @@
 			reg = <0x2000 0x100>;
 			regulator-name = "pm8953_s5";
 			regulator-min-microvolt = <490000>;
-			regulator-max-microvolt = <910000>;
+			regulator-max-microvolt = <960000>;
 
 			pm8953_s5_limit: avs-limit-regulator {
 				regulator-name = "pm8953_s5_avs_limit";
 				regulator-min-microvolt = <490000>;
-				regulator-max-microvolt = <910000>;
+				regulator-max-microvolt = <960000>;
 			};
 		};
 	};
@@ -421,7 +421,7 @@
 		regulator-max-microvolt = <5>;
 
 		qcom,cpr-fuse-corners = <3>;
-		qcom,cpr-voltage-ceiling = <760000 795000 910000>;
+		qcom,cpr-voltage-ceiling = <810000 845000 960000>;
 		qcom,cpr-voltage-floor =   <700000 700000 790000>;
 		vdd-apc-supply = <&pm8953_s5>;
 		mem-acc-supply = <&apc_mem_acc_vreg>;
@@ -472,14 +472,21 @@
 		qcom,cpr-fuse-version-map =
 			/* <Speed-bin pvs-version cpr-rev ... ... ...> */
 			<(-1)    (-1)   ( 0)   (-1)    (-1)    (-1)>,
+			<(-1)    (-1)   ( 1)   (-1)    (-1)    (-1)>,
 			<(-1)    (-1)   (-1)   (-1)    (-1)    (-1)>;
 
 		qcom,cpr-quotient-adjustment =
-			<66    77      66>, /* SVSP_30mV, NOM_35mV, TUR_30mV */
-			<0      0       0>;
+			<66      77      66>, /* SVSP/NOM/TUR:30/35/30 mV */
+			<(-74) (-57)  (-30)>, /* SVSP/NOM/TUR:-34/-26/-14 mV */
+			<0        0       0>;
+
+		qcom,cpr-floor-to-ceiling-max-range =
+			<50000 50000 50000 65000 65000>,
+			<50000 50000 50000 65000 65000>,
+			<50000 50000 50000 65000 65000>;
 
 		qcom,cpr-voltage-ceiling-override =
-			<(-1) (-1) 795000 795000 835000 910000 910000>;
+			<(-1) (-1) 810000 845000 885000 960000 960000>;
 
 		qcom,cpr-enable;
 	};
diff --git a/arch/arm64/boot/dts/qcom/sdm439.dtsi b/arch/arm64/boot/dts/qcom/sdm439.dtsi
index 0e4f666..be05b6e 100644
--- a/arch/arm64/boot/dts/qcom/sdm439.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439.dtsi
@@ -50,6 +50,7 @@
 		qcom,governor-per-policy;
 
 		qcom,cpufreq-table-0 =
+			 <  960000 >,
 			 < 1305600 >,
 			 < 1497600 >,
 			 < 1708800 >,
@@ -279,6 +280,7 @@
 
 	qcom,speed0-bin-v0-c1 =
 		<          0 0>,
+		<  960000000 1>,
 		< 1305600000 1>,
 		< 1497600000 2>,
 		< 1708800000 3>,
@@ -299,6 +301,7 @@
 
 	qcom,speed1-bin-v0-c1 =
 		<          0 0>,
+		<  960000000 1>,
 		< 1305600000 1>,
 		< 1497600000 2>,
 		< 1708800000 3>,
@@ -325,6 +328,16 @@
 	vdd_hf_pll-supply = <&pm8953_l7_ao>;
 };
 
+&soc {
+	devfreq_spdm_cpu {
+		status = "disabled";
+	};
+
+	devfreq_spdm_gov {
+		status = "disabled";
+	};
+};
+
 &clock_gcc_mdss {
 	compatible = "qcom,gcc-mdss-sdm439";
 	clocks = <&mdss_dsi0_pll clk_dsi0pll_pixel_clk_src>,
@@ -341,10 +354,10 @@
 	reg = <0x001a94400 0x400>,
 		<0x0184d074 0x8>;
 	reg-names = "pll_base", "gdsc_base";
-	/delete-property/ qcom,dsi-pll-ssc-en;
-	/delete-property/ qcom,dsi-pll-ssc-mode;
-	/delete-property/ qcom,ssc-frequency-hz;
-	/delete-property/ qcom,ssc-ppm;
+	qcom,dsi-pll-ssc-en;
+	qcom,dsi-pll-ssc-mode = "down-spread";
+	qcom,ssc-frequency-hz = <31500>;
+	qcom,ssc-ppm = <5000>;
 };
 
 &mdss_dsi1_pll {
@@ -352,10 +365,10 @@
 	reg = <0x001a96400 0x400>,
 		<0x0184d074 0x8>;
 	reg-names = "pll_base", "gdsc_base";
-	/delete-property/ qcom,dsi-pll-ssc-en;
-	/delete-property/ qcom,dsi-pll-ssc-mode;
-	/delete-property/ qcom,ssc-frequency-hz;
-	/delete-property/ qcom,ssc-ppm;
+	qcom,dsi-pll-ssc-en;
+	qcom,dsi-pll-ssc-mode = "down-spread";
+	qcom,ssc-frequency-hz = <31500>;
+	qcom,ssc-ppm = <5000>;
 };
 
 &mdss_dsi {
@@ -622,3 +635,17 @@
 		};
 	};
 };
+
+&mdss_mdp {
+	qcom,vbif-settings = <0xd0 0x20>;
+};
+
+&thermal_zones {
+	hexa-cpu-max-step {
+		trips {
+			cpu-trip {
+				temperature = <95000>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts
index b9aadc1..b73b49a 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts
@@ -14,8 +14,8 @@
 /dts-v1/;
 
 #include "sdm450.dtsi"
-#include "sdm450-pmi632-mtp-s3.dtsi"
 #include "sdm450-pmi632.dtsi"
+#include "sdm450-pmi632-mtp-s3.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM450 + PMI632 MTP S3";
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi
index 64d9e64..5db626b 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi
@@ -46,3 +46,82 @@
 	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
 	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
 };
+
+&i2c_2 {
+#include "smb1355.dtsi"
+};
+
+&pmi632_gpios {
+	smb_en {
+		smb_en_default: smb_en_default {
+			pins = "gpio2";
+			function = "func1";
+			output-enable;
+		};
+	};
+
+	pmi632_sense {
+		/* GPIO 7 and 8 are external-sense pins for PMI632 */
+		pmi632_sense_default: pmi632_sense_default {
+			pins = "gpio7", "gpio8";
+			bias-high-impedance;	/* disable the GPIO */
+			bias-disable;		/* no-pull */
+		};
+	};
+
+	pmi632_ctm {
+		/* Disable GPIO1 for h/w base mitigation */
+		pmi632_ctm_default: pmi632_ctm_default {
+			pins = "gpio1";
+			bias-high-impedance;	/* disable the GPIO */
+			bias-disable;		/* no-pull */
+		};
+	};
+};
+
+&tlmm {
+	smb_int_default: smb_int_default {
+		mux {
+			pins = "gpio59";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio59";
+			drive-strength = <2>;
+			bias-pull-up;
+			input-enable;
+		};
+	};
+};
+
+&smb1355_0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default
+		&smb_en_default &pmi632_sense_default &pmi632_ctm_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+	smb1355_charger_0: qcom,smb1355-charger@1000 {
+		status ="ok";
+		/delete-property/ io-channels;
+		/delete-property/ io-channels-names;
+		qcom,parallel-mode = <1>;
+		qcom,disable-ctm;
+		qcom,hw-die-temp-mitigation;
+	};
+};
+
+&smb1355_1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default
+		&smb_en_default &pmi632_sense_default &pmi632_ctm_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+	smb1355_charger_1: qcom,smb1355-charger@1000 {
+		status ="ok";
+		/delete-property/ io-channels;
+		/delete-property/ io-channels-names;
+		qcom,parallel-mode = <1>;
+		qcom,disable-ctm;
+		qcom,hw-die-temp-mitigation;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi
index e09d637..6e39327 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi
@@ -372,3 +372,24 @@
 		};
 	};
 };
+
+&tlmm {
+	pmx_mdss {
+		mdss_dsi_active: mdss_dsi_active {
+			 mux {
+				 pins = "gpio61";
+			 };
+			 config {
+				 pins = "gpio61";
+			 };
+		 };
+		mdss_dsi_suspend: mdss_dsi_suspend {
+			  mux {
+				  pins = "gpio61";
+			  };
+			  config {
+				  pins = "gpio61";
+			  };
+		  };
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi
index 386bd71..40fdb91 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi
@@ -177,3 +177,81 @@
 	};
 };
 
+&i2c_2 {
+#include "smb1355.dtsi"
+};
+
+&pmi632_gpios {
+	smb_en {
+		smb_en_default: smb_en_default {
+			pins = "gpio2";
+			function = "func1";
+			output-enable;
+		};
+	};
+
+	pmi632_sense {
+		/* GPIO 7 and 8 are external-sense pins for PMI632 */
+		pmi632_sense_default: pmi632_sense_default {
+			pins = "gpio7", "gpio8";
+			bias-high-impedance;	/* disable the GPIO */
+			bias-disable;		/* no-pull */
+		};
+	};
+
+	pmi632_ctm {
+		/* Disable GPIO1 for h/w base mitigation */
+		pmi632_ctm_default: pmi632_ctm_default {
+			pins = "gpio1";
+			bias-high-impedance;	/* disable the GPIO */
+			bias-disable;		/* no-pull */
+		};
+	};
+};
+
+&tlmm {
+	smb_int_default: smb_int_default {
+		mux {
+			pins = "gpio59";
+			function = "gpio";
+		};
+		config {
+			pins = "gpio59";
+			drive-strength = <2>;
+			bias-pull-up;
+			input-enable;
+		};
+	};
+};
+
+&smb1355_0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default
+		&smb_en_default &pmi632_sense_default &pmi632_ctm_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+	smb1355_charger_0: qcom,smb1355-charger@1000 {
+		status ="ok";
+		/delete-property/ io-channels;
+		/delete-property/ io-channels-names;
+		qcom,parallel-mode = <1>;
+		qcom,disable-ctm;
+		qcom,hw-die-temp-mitigation;
+	};
+};
+
+&smb1355_1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&smb_int_default
+		&smb_en_default &pmi632_sense_default &pmi632_ctm_default>;
+	interrupt-parent = <&tlmm>;
+	interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+	smb1355_charger_1: qcom,smb1355-charger@1000 {
+		status ="ok";
+		/delete-property/ io-channels;
+		/delete-property/ io-channels-names;
+		qcom,parallel-mode = <1>;
+		qcom,disable-ctm;
+		qcom,hw-die-temp-mitigation;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts b/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts
index 2669d1f..9d6543f 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts
@@ -24,33 +24,3 @@
 	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
 };
 
-
-&soc {
-	gpio_keys {
-		/delete-node/home;
-	};
-};
-
-&tlmm {
-	tlmm_gpio_key {
-		gpio_key_active: gpio_key_active {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-
-		gpio_key_suspend: gpio_key_suspend {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-	};
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3.dts b/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3.dts
index 17ae9d1..60b149d 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3.dts
@@ -24,34 +24,3 @@
 	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
 };
 
-
-&soc {
-	gpio_keys {
-		/delete-node/home;
-	};
-};
-
-&tlmm {
-	tlmm_gpio_key {
-		gpio_key_active: gpio_key_active {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-
-		gpio_key_suspend: gpio_key_suspend {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-	};
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts b/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts
index 3662cf3..1dd1163 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts
@@ -14,8 +14,8 @@
 /dts-v1/;
 
 #include "sdm632.dtsi"
-#include "sdm450-pmi632-mtp-s3.dtsi"
 #include "sdm450-pmi632.dtsi"
+#include "sdm450-pmi632-mtp-s3.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM632 + PMI632 MTP S3";
diff --git a/arch/arm64/boot/dts/qcom/sdm632-pm8004-cdp-s2.dts b/arch/arm64/boot/dts/qcom/sdm632-pm8004-cdp-s2.dts
index 4d68901..e0e6b4b 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-pm8004-cdp-s2.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-pm8004-cdp-s2.dts
@@ -25,34 +25,3 @@
 	qcom,pmic-id = <0x010016 0x25 0xC 0x0>;
 };
 
-
-&soc {
-	gpio_keys {
-		/delete-node/home;
-	};
-};
-
-&tlmm {
-	tlmm_gpio_key {
-		gpio_key_active: gpio_key_active {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-
-		gpio_key_suspend: gpio_key_suspend {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-	};
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm632-pm8004-ext-codec-cdp-s3.dts b/arch/arm64/boot/dts/qcom/sdm632-pm8004-ext-codec-cdp-s3.dts
index 6ca2940..413e85f 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-pm8004-ext-codec-cdp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-pm8004-ext-codec-cdp-s3.dts
@@ -25,34 +25,3 @@
 	qcom,pmic-id = <0x010016 0x25 0xC 0x0>;
 };
 
-
-&soc {
-	gpio_keys {
-		/delete-node/home;
-	};
-};
-
-&tlmm {
-	tlmm_gpio_key {
-		gpio_key_active: gpio_key_active {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-
-		gpio_key_suspend: gpio_key_suspend {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-	};
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm632-pm8004-mtp-s3.dts b/arch/arm64/boot/dts/qcom/sdm632-pm8004-mtp-s3.dts
index d2a9cf1..aea6bff 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-pm8004-mtp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-pm8004-mtp-s3.dts
@@ -14,8 +14,8 @@
 /dts-v1/;
 
 #include "sdm632.dtsi"
-#include "sdm450-pmi632-mtp-s3.dtsi"
 #include "sdm450-pmi632.dtsi"
+#include "sdm450-pmi632-mtp-s3.dtsi"
 #include "sdm632-pm8004.dtsi"
 
 / {
diff --git a/arch/arm64/boot/dts/qcom/sdm632-rcm.dts b/arch/arm64/boot/dts/qcom/sdm632-rcm.dts
index fe7ab38..68f0ea0 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-rcm.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-rcm.dts
@@ -24,32 +24,3 @@
 	qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
 };
 
-&soc {
-	gpio_keys {
-		/delete-node/home;
-	};
-};
-
-&tlmm {
-	tlmm_gpio_key {
-		gpio_key_active: gpio_key_active {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-
-		gpio_key_suspend: gpio_key_suspend {
-			mux {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-
-			config {
-				pins = "gpio85", "gpio86", "gpio87";
-			};
-		};
-	};
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm632-rcm.dtsi b/arch/arm64/boot/dts/qcom/sdm632-rcm.dtsi
index 14ba3b4..aa20680 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-rcm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm632-rcm.dtsi
@@ -13,3 +13,34 @@
 
 #include "sdm450-pmi632-cdp-s2.dtsi"
 
+&soc {
+	gpio_keys {
+		home {
+			status = "disabled";
+		};
+	};
+};
+
+&tlmm {
+	tlmm_gpio_key {
+		gpio_key_active {
+			mux {
+				pins = "gpio85", "gpio86", "gpio87";
+			};
+
+			config {
+				pins = "gpio85", "gpio86", "gpio87";
+			};
+		};
+
+		gpio_key_suspend {
+			mux {
+				pins = "gpio85", "gpio86", "gpio87";
+			};
+
+			config {
+				pins = "gpio85", "gpio86", "gpio87";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm632.dtsi b/arch/arm64/boot/dts/qcom/sdm632.dtsi
index 67efe0f..ee2476e 100644
--- a/arch/arm64/boot/dts/qcom/sdm632.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm632.dtsi
@@ -38,6 +38,16 @@
 	compatible = "qcom,cc-debug-sdm632";
 };
 
+&soc {
+	devfreq_spdm_cpu {
+		status = "disabled";
+	};
+
+	devfreq_spdm_gov {
+		status = "disabled";
+	};
+};
+
 &clock_gcc_gfx {
 	compatible = "qcom,gcc-gfx-sdm632";
 	qcom,gfxfreq-corner =
@@ -1005,3 +1015,97 @@
 };
 
 #include "sdm632-coresight.dtsi"
+
+/* GPU Overrides*/
+&msm_gpu {
+
+	qcom,ca-target-pwrlevel = <4>;
+	qcom,initial-pwrlevel = <5>;
+	/delete-node/qcom,gpu-pwrlevels;
+
+	/* Power levels */
+	qcom,gpu-pwrlevels {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible = "qcom,gpu-pwrlevels";
+		/* TURBO LD0 */
+		qcom,gpu-pwrlevel@0 {
+			reg = <0>;
+			qcom,gpu-freq = <725000000>;
+			qcom,bus-freq = <10>;
+			qcom,bus-min = <10>;
+			qcom,bus-max = <10>;
+		};
+
+		/* TURBO */
+		qcom,gpu-pwrlevel@1 {
+			reg = <1>;
+			qcom,gpu-freq = <650000000>;
+			qcom,bus-freq = <10>;
+			qcom,bus-min = <10>;
+			qcom,bus-max = <10>;
+		};
+
+		/* NOM+ */
+		qcom,gpu-pwrlevel@2 {
+			reg = <2>;
+			qcom,gpu-freq = <560000000>;
+			qcom,bus-freq = <10>;
+			qcom,bus-min = <8>;
+			qcom,bus-max = <10>;
+		};
+
+		/* NOM */
+		qcom,gpu-pwrlevel@3 {
+			reg = <3>;
+			qcom,gpu-freq = <510000000>;
+			qcom,bus-freq = <9>;
+			qcom,bus-min = <6>;
+			qcom,bus-max = <10>;
+		};
+
+		/* SVS+ */
+		qcom,gpu-pwrlevel@4 {
+			reg = <4>;
+			qcom,gpu-freq = <400000000>;
+			qcom,bus-freq = <7>;
+			qcom,bus-min = <5>;
+			qcom,bus-max = <8>;
+		};
+
+		/* SVS */
+		qcom,gpu-pwrlevel@5 {
+			reg = <5>;
+			qcom,gpu-freq = <320000000>;
+			qcom,bus-freq = <4>;
+			qcom,bus-min = <2>;
+			qcom,bus-max = <6>;
+		};
+
+		/* Low SVS */
+		qcom,gpu-pwrlevel@6 {
+			reg = <6>;
+			qcom,gpu-freq = <216000000>;
+			qcom,bus-freq = <3>;
+			qcom,bus-min = <2>;
+			qcom,bus-max = <4>;
+		};
+
+		qcom,gpu-pwrlevel@7 {
+			reg = <7>;
+			qcom,gpu-freq = <133300000>;
+			qcom,bus-freq = <3>;
+			qcom,bus-min = <1>;
+			qcom,bus-max = <4>;
+		};
+		/* XO */
+		qcom,gpu-pwrlevel@8 {
+			reg = <8>;
+			qcom,gpu-freq = <19200000>;
+			qcom,bus-freq = <0>;
+			qcom,bus-min = <0>;
+			qcom,bus-max = <0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
index c8f7ac0..78047bd 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+
 &soc {
 	led_flash_rear: qcom,camera-flash@0 {
 		cell-index = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
index 9402294..348ba6f 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
@@ -411,6 +411,7 @@
 		qcom,cpas-hw-ver = <0x170110>; /* Titan v170 v1.1.0 */
 		nvmem-cells = <&minor_rev>;
 		nvmem-cell-names = "minor_rev";
+		camnoc-axi-min-ib-bw = <3000000000>;
 		regulator-names = "camss-vdd";
 		camss-vdd-supply = <&titan_top_gdsc>;
 		clock-names = "gcc_ahb_clk",
diff --git a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
index da4d27d..95fb25a 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
@@ -38,6 +38,8 @@
 	qcom,vddp-ref-clk-supply = <&pm660_l1>;
 	qcom,vddp-ref-clk-max-microamp = <100>;
 
+	force-ufshc-probe;
+
 	status = "ok";
 };
 
@@ -296,6 +298,17 @@
 	qcom,platform-te-gpio = <&tlmm 10 0>;
 };
 
+&dsi_hx8399_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+};
+
 &dsi_dual_nt35597_truly_video_display {
 	qcom,dsi-display-active;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
index 7764837..7162257 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
@@ -14,7 +14,10 @@
 #include "sdm670-pmic-overlay.dtsi"
 #include "sdm670-sde-display.dtsi"
 #include "sdm670-camera-sensor-mtp.dtsi"
+
+&qupv3_se10_i2c {
 #include "smb1355.dtsi"
+};
 
 &ufsphy_mem {
 	compatible = "qcom,ufs-phy-qmp-v3";
@@ -355,6 +358,17 @@
 	qcom,platform-te-gpio = <&tlmm 10 0>;
 };
 
+&dsi_hx8399_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+};
+
 &dsi_dual_nt35597_truly_video_display {
 	qcom,dsi-display-active;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
index 27be1fd..f63d442 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
@@ -10,6 +10,8 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
 &pm660_0{
 	pm660_charger: qcom,qpnp-smb2 {
 		compatible = "qcom,qpnp-smb2";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-qrd-overlay.dts
index 36d485e..67b5ebe 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,7 +19,7 @@
 #include <dt-bindings/clock/qcom,rpmh.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku1.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L QRD";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku1.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku1.dtsi
new file mode 100644
index 0000000..2c1cde6
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku1.dtsi
@@ -0,0 +1,14 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm670-qrd.dtsi"
+#include "sdm670-audio-overlay.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts
index 73d1909..d5edb36 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts
@@ -13,13 +13,7 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/clock/qcom,gcc-sdm845.h>
-#include <dt-bindings/clock/qcom,camcc-sdm845.h>
-#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
-#include <dt-bindings/clock/qcom,rpmh.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku2.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L QRD SKU2";
@@ -30,22 +24,3 @@
 		       <0x0001001b 0x0102001a 0x0 0x0>,
 		       <0x0001001b 0x0201011a 0x0 0x0>;
 };
-
-&dsi_dual_nt36850_truly_cmd_display {
-	/delete-property/ qcom,dsi-display-active;
-};
-
-&dsi_hx8399_truly_cmd {
-	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
-	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
-	qcom,mdss-dsi-bl-min-level = <1>;
-	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,panel-mode-gpio = <&tlmm 76 0>;
-	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
-	qcom,platform-reset-gpio = <&tlmm 75 0>;
-	qcom,platform-te-gpio = <&tlmm 10 0>;
-};
-
-&dsi_hx8399_truly_cmd_display {
-	qcom,dsi-display-active;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts
index 680bc17..9f871c5 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,7 @@
 /dts-v1/;
 
 #include "sdm670.dtsi"
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku2.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L QRD SKU2";
@@ -24,22 +24,3 @@
 		       <0x0001001b 0x0102001a 0x0 0x0>,
 		       <0x0001001b 0x0201011a 0x0 0x0>;
 };
-
-&dsi_dual_nt36850_truly_cmd_display {
-	/delete-property/ qcom,dsi-display-active;
-};
-
-&dsi_hx8399_truly_cmd {
-	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
-	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
-	qcom,mdss-dsi-bl-min-level = <1>;
-	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,panel-mode-gpio = <&tlmm 76 0>;
-	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
-	qcom,platform-reset-gpio = <&tlmm 75 0>;
-	qcom,platform-te-gpio = <&tlmm 10 0>;
-};
-
-&dsi_hx8399_truly_cmd_display {
-	qcom,dsi-display-active;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dtsi
new file mode 100644
index 0000000..cdb652e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dtsi
@@ -0,0 +1,32 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "sdm670-qrd.dtsi"
+#include "sdm670-audio-overlay.dtsi"
+
+&dsi_dual_nt36850_truly_cmd_display {
+	/delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_hx8399_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_hx8399_truly_cmd_display {
+	qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd.dts b/arch/arm64/boot/dts/qcom/sdm670-qrd.dts
index c22afa4..318939f 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,7 @@
 /dts-v1/;
 
 #include "sdm670.dtsi"
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku1.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L QRD";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
index 43f1465..5ff2c32 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
@@ -10,13 +10,16 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/clock/qcom,rpmh.h>
 #include <dt-bindings/gpio/gpio.h>
 #include "sdm670-camera-sensor-qrd.dtsi"
 #include "sdm670-pmic-overlay.dtsi"
-#include "sdm670-audio-overlay.dtsi"
-#include "smb1355.dtsi"
 #include "sdm670-sde-display.dtsi"
 
+&qupv3_se10_i2c {
+#include "smb1355.dtsi"
+};
+
 &qupv3_se9_2uart {
 	status = "disabled";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
index 48deca6..395e88c 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
@@ -468,7 +468,7 @@
 
 	dsi_hx8399_truly_cmd_display: qcom,dsi-display@16 {
 		compatible = "qcom,dsi-display";
-		label = "dsi_hx8399_truly_cmd_display";
+		label = "dsi_hx8399_truly_fhd_video_display";
 		qcom,display-type = "primary";
 
 		qcom,dsi-ctrl = <&mdss_dsi0>;
@@ -858,6 +858,7 @@
 	qcom,mdss-dsi-panel-status-value = <0x9c>;
 	qcom,mdss-dsi-panel-on-check-value = <0x9c>;
 	qcom,mdss-dsi-panel-status-read-length = <1>;
+	qcom,mdss-dsi-panel-cmds-only-by-right;
 	qcom,mdss-dsi-display-timings {
 		timing@0{
 			qcom,mdss-dsi-panel-phy-timings = [00 1f 08 08 24 23 08
diff --git a/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi
index a0b4a22..f5e9489 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi
@@ -129,15 +129,19 @@
 
 		/* Clocks */
 		clock-names = "core_clk", "iface_clk", "bus_clk",
-			"core0_clk", "core0_bus_clk";
+			"core0_clk", "core0_bus_clk", "core1_clk",
+			"core1_bus_clk";
 		clocks = <&clock_videocc VIDEO_CC_VENUS_CTL_CORE_CLK>,
 			<&clock_videocc VIDEO_CC_VENUS_AHB_CLK>,
 			<&clock_videocc VIDEO_CC_VENUS_CTL_AXI_CLK>,
 			<&clock_videocc VIDEO_CC_VCODEC0_CORE_CLK>,
-			<&clock_videocc VIDEO_CC_VCODEC0_AXI_CLK>;
+			<&clock_videocc VIDEO_CC_VCODEC0_AXI_CLK>,
+			<&clock_videocc VIDEO_CC_VCODEC1_CORE_CLK>,
+			<&clock_videocc VIDEO_CC_VCODEC1_AXI_CLK>;
 		qcom,proxy-clock-names = "core_clk", "iface_clk",
-			"bus_clk", "core0_clk", "core0_bus_clk";
-		qcom,clock-configs = <0x1 0x0 0x0 0x1 0x0>;
+			"bus_clk", "core0_clk", "core0_bus_clk",
+			"core1_clk", "core1_bus_clk";
+		qcom,clock-configs = <0x1 0x0 0x0 0x1 0x0 0x4 0x4>;
 		qcom,allowed-clock-rates = <100000000 200000000 330000000
 			364700000>;
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 4b39207..c962b42 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -730,6 +730,7 @@
 		      <0x17a60000 0x100000>;    /* GICR * 8 */
 		interrupts = <1 9 4>;
 		interrupt-parent = <&intc>;
+		ignored-save-restore-irqs = <38>;
 	};
 
 	pdc: interrupt-controller@b220000{
@@ -1866,6 +1867,7 @@
 		ufs-qcom-crypto = <&ufs_ice>;
 
 		lanes-per-direction = <1>;
+		spm-level = <5>;
 		dev-ref-clk-freq = <0>; /* 19.2 MHz */
 
 		clock-names =
@@ -1879,7 +1881,7 @@
 			"rx_lane0_sync_clk";
 		clocks =
 			<&clock_gcc GCC_UFS_PHY_AXI_HW_CTL_CLK>,
-			<&clock_gcc GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK>,
+			<&clock_gcc UFS_PHY_AXI_UFS_VOTE_CLK>,
 			<&clock_gcc GCC_UFS_PHY_AHB_CLK>,
 			<&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK>,
 			<&clock_gcc GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK>,
@@ -2216,6 +2218,7 @@
 		status = "ok";
 		memory-region = <&pil_modem_mem>;
 		qcom,mem-protect-id = <0xF>;
+		qcom,complete-ramdump;
 
 		/* GPIO inputs from mss */
 		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
@@ -2382,7 +2385,7 @@
 		clocks = <&clock_gcc GCC_SDCC1_AHB_CLK>,
 			<&clock_gcc GCC_SDCC1_APPS_CLK>,
 			<&clock_gcc GCC_SDCC1_ICE_CORE_CLK>,
-			<&clock_gcc GCC_AGGRE_UFS_PHY_AXI_CLK>;
+			<&clock_gcc UFS_PHY_AXI_EMMC_VOTE_CLK>;
 		clock-names = "iface_clk", "core_clk", "ice_core_clk",
 			"bus_aggr_clk";
 
diff --git a/arch/arm64/boot/dts/qcom/sdm710-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-qrd-overlay.dts
index 803616d..08c3433 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-qrd-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-qrd-overlay.dts
@@ -13,13 +13,7 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/clock/qcom,gcc-sdm845.h>
-#include <dt-bindings/clock/qcom,camcc-sdm845.h>
-#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
-#include <dt-bindings/clock/qcom,rpmh.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku1.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L QRD";
diff --git a/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2-overlay.dts
index ab3ce4d..91891ba 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2-overlay.dts
@@ -13,13 +13,7 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/clock/qcom,gcc-sdm845.h>
-#include <dt-bindings/clock/qcom,camcc-sdm845.h>
-#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
-#include <dt-bindings/clock/qcom,rpmh.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku2.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L QRD SKU2";
@@ -30,22 +24,3 @@
 		       <0x0001001b 0x0102001a 0x0 0x0>,
 		       <0x0001001b 0x0201011a 0x0 0x0>;
 };
-
-&dsi_dual_nt36850_truly_cmd_display {
-	/delete-property/ qcom,dsi-display-active;
-};
-
-&dsi_hx8399_truly_cmd {
-	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
-	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
-	qcom,mdss-dsi-bl-min-level = <1>;
-	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,panel-mode-gpio = <&tlmm 76 0>;
-	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
-	qcom,platform-reset-gpio = <&tlmm 75 0>;
-	qcom,platform-te-gpio = <&tlmm 10 0>;
-};
-
-&dsi_hx8399_truly_cmd_display {
-	qcom,dsi-display-active;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2.dts b/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2.dts
index 76b2862..f674893 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2.dts
@@ -14,7 +14,7 @@
 /dts-v1/;
 
 #include "sdm710.dtsi"
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku2.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L QRD SKU2";
@@ -24,22 +24,3 @@
 		       <0x0001001b 0x0102001a 0x0 0x0>,
 		       <0x0001001b 0x0201011a 0x0 0x0>;
 };
-
-&dsi_dual_nt36850_truly_cmd_display {
-	/delete-property/ qcom,dsi-display-active;
-};
-
-&dsi_hx8399_truly_cmd {
-	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
-	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
-	qcom,mdss-dsi-bl-min-level = <1>;
-	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,panel-mode-gpio = <&tlmm 76 0>;
-	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
-	qcom,platform-reset-gpio = <&tlmm 75 0>;
-	qcom,platform-te-gpio = <&tlmm 10 0>;
-};
-
-&dsi_hx8399_truly_cmd_display {
-	qcom,dsi-display-active;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm710-qrd.dts b/arch/arm64/boot/dts/qcom/sdm710-qrd.dts
index e3cb7cc..4eb414f 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-qrd.dts
@@ -15,6 +15,7 @@
 
 #include "sdm710.dtsi"
 #include "sdm670-qrd.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L QRD";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-670-usb-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-670-usb-common.dtsi
index 0c37bf1..bf09b67 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-670-usb-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-670-usb-common.dtsi
@@ -129,6 +129,7 @@
 		vdd-supply = <&pm8998_l1>;
 		vdda18-supply = <&pm8998_l12>;
 		vdda33-supply = <&pm8998_l24>;
+		qcom,override-bias-ctrl2;
 		qcom,vdd-voltage-level = <0 880000 880000>;
 		qcom,qusb-phy-reg-offset =
 			<0x240 /* QUSB2PHY_PORT_TUNE1 */
@@ -417,6 +418,7 @@
 		vdd-supply = <&pm8998_l1>;
 		vdda18-supply = <&pm8998_l12>;
 		vdda33-supply = <&pm8998_l24>;
+		qcom,override-bias-ctrl2;
 		qcom,vdd-voltage-level = <0 880000 880000>;
 		qcom,qusb-phy-reg-offset =
 			<0x240 /* QUSB2PHY_PORT_TUNE1 */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-svr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-svr.dtsi
index d387f93..aa068e5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-svr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-svr.dtsi
@@ -220,7 +220,7 @@
 				 &cam_sensor_rear2_suspend>;
 		gpios = <&tlmm 15 0>,
 			<&tlmm 9 0>,
-			<&tlmm 8 0>;
+			<&tlmm 7 0>;
 		gpio-reset = <1>;
 		gpio-vana = <2>;
 		gpio-req-tbl-num = <0 1 2>;
@@ -261,7 +261,7 @@
 				 &cam_sensor_front_suspend>;
 		gpios = <&tlmm 14 0>,
 			<&tlmm 28 0>,
-			<&tlmm 8 0>;
+			<&tlmm 7 0>;
 		gpio-reset = <1>;
 		gpio-vana = <2>;
 		gpio-req-tbl-num = <0 1 2>;
@@ -441,37 +441,39 @@
 		sensor-position-roll = <270>;
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <0>;
-		led-flash-src = <&led_flash_iris>;
-		cam_vio-supply = <&pm8998_lvs1>;
-		cam_vana-supply = <&pmi8998_bob>;
-		cam_vdig-supply = <&camera_ldo>;
+		cam_vio-supply = <&pm8998_l9>;
+		cam_vana-supply = <&pm8998_l16>;
+		cam_vdig-supply = <&pm8998_l10>;
 		cam_clk-supply = <&titan_top_gdsc>;
 		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 			"cam_clk";
 		rgltr-cntrl-support;
-		rgltr-min-voltage = <0 3312000 1050000 0>;
-		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-min-voltage = <1800000 3312000 1800000 0>;
+		rgltr-max-voltage = <1800000 3312000 1800000 0>;
 		rgltr-load-current = <0 80000 105000 0>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
-		pinctrl-0 = <&cam_sensor_mclk3_active
+		pinctrl-0 = <&cam_sensor_mclk2_active
 				 &cam_sensor_iris_active>;
-		pinctrl-1 = <&cam_sensor_mclk3_suspend
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
 				 &cam_sensor_iris_suspend>;
-		gpios = <&tlmm 16 0>,
-			<&tlmm 9 0>,
-			<&tlmm 8 0>;
+		gpios = <&tlmm 15 0>,
+			<&tlmm 21 0>,
+			<&tlmm 122 0>,
+			<&tlmm 59 0>;
 		gpio-reset = <1>;
 		gpio-vana = <2>;
-		gpio-req-tbl-num = <0 1 2>;
-		gpio-req-tbl-flags = <1 0 0>;
+		gpio-vdig = <3>;
+		gpio-req-tbl-num = <0 1 2 3>;
+		gpio-req-tbl-flags = <1 0 0 0>;
 		gpio-req-tbl-label = "CAMIF_MCLK3",
 					"CAM_RESET3",
-					"CAM_VANA1";
+					"CAM_VANA3",
+					"CAM_VDIG3";
 		sensor-mode = <0>;
 		cci-master = <1>;
 		status = "ok";
-		clocks = <&clock_camcc CAM_CC_MCLK3_CLK>;
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
 		clock-names = "cam_clk";
 		clock-cntl-level = "turbo";
 		clock-rates = <24000000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index fd6a0c7..2e2de74 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -381,6 +381,7 @@
 		interrupt-names = "cpas_camnoc";
 		interrupts = <0 459 0>;
 		qcom,cpas-hw-ver = <0x170100>; /* Titan v170 v1.0.0 */
+		camnoc-axi-min-ib-bw = <3000000000>;
 		regulator-names = "camss-vdd";
 		camss-vdd-supply = <&titan_top_gdsc>;
 		clock-names = "gcc_ahb_clk",
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 349c4c0..812a313 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -14,7 +14,10 @@
 #include "sdm845-pmic-overlay.dtsi"
 #include "sdm845-pinctrl-overlay.dtsi"
 #include "sdm845-camera-sensor-mtp.dtsi"
+
+&qupv3_se10_i2c {
 #include "smb1355.dtsi"
+};
 
 &vendor {
 	bluetooth: bt_wcn3990 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 6034b6d..f5a979c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -13,9 +13,12 @@
 #include "sdm845-pmic-overlay.dtsi"
 #include "sdm845-pinctrl-overlay.dtsi"
 #include "sdm845-camera-sensor-qrd.dtsi"
-#include "smb1355.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 
+&qupv3_se10_i2c {
+#include "smb1355.dtsi"
+};
+
 &vendor {
 	bluetooth: bt_wcn3990 {
 		compatible = "qca,wcn3990";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
index a5c6ab5..b2b0000 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
@@ -18,7 +18,10 @@
 
 #include "sdm845-pmic-overlay.dtsi"
 #include "sdm845-pinctrl-overlay.dtsi"
+
+&qupv3_se10_i2c {
 #include "smb1355.dtsi"
+};
 
 &vendor {
 	bluetooth: bt_wcn3990 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index bfcebf6..6132722 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -258,33 +258,13 @@
 
 		/* data and reg bus scale settings */
 		qcom,sde-data-bus {
-			qcom,msm-bus,name = "mdss_sde_mnoc";
+			qcom,msm-bus,name = "mdss_sde";
 			qcom,msm-bus,num-cases = <3>;
 			qcom,msm-bus,num-paths = <2>;
 			qcom,msm-bus,vectors-KBps =
-			    <22 773 0 0>, <23 773 0 0>,
-			    <22 773 0 6400000>, <23 773 0 6400000>,
-			    <22 773 0 6400000>, <23 773 0 6400000>;
-		};
-
-		qcom,sde-llcc-bus {
-			qcom,msm-bus,name = "mdss_sde_llcc";
-			qcom,msm-bus,num-cases = <3>;
-			qcom,msm-bus,num-paths = <1>;
-			qcom,msm-bus,vectors-KBps =
-			    <132 770 0 0>,
-			    <132 770 0 6400000>,
-			    <132 770 0 6400000>;
-		};
-
-		qcom,sde-ebi-bus {
-			qcom,msm-bus,name = "mdss_sde_ebi";
-			qcom,msm-bus,num-cases = <3>;
-			qcom,msm-bus,num-paths = <1>;
-			qcom,msm-bus,vectors-KBps =
-			    <129 512 0 0>,
-			    <129 512 0 6400000>,
-			    <129 512 0 6400000>;
+				<22 512 0 0>, <23 512 0 0>,
+				<22 512 0 6400000>, <23 512 0 6400000>,
+				<22 512 0 6400000>, <23 512 0 6400000>;
 		};
 
 		qcom,sde-reg-bus {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
index 85419c8..97cb981 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
@@ -302,6 +302,7 @@
 		interrupt-names = "cpas_camnoc";
 		interrupts = <0 459 0>;
 		qcom,cpas-hw-ver = <0x170110>; /* Titan v170 v1.1.0 */
+		camnoc-axi-min-ib-bw = <3000000000>;
 		regulator-names = "camss-vdd";
 		camss-vdd-supply = <&titan_top_gdsc>;
 		clock-names = "gcc_ahb_clk",
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index ba76273..229d06b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -51,7 +51,7 @@
 			compatible = "qcom,memshare-peripheral";
 			qcom,peripheral-size = <0x500000>;
 			qcom,client-id = <1>;
-			qcom,allocate-boot-time;
+			qcom,allocate-on-request;
 			label = "modem";
 		};
 	};
@@ -470,6 +470,7 @@
 			2784000 35000
 			2803200 40000
 			2841600 50000
+			2956800 60000
 		>;
 		idle-cost-data = <
 			100 80 60 40
@@ -537,6 +538,7 @@
 			2784000 165
 			2803200 170
 			2841600 180
+			2956800 190
 		>;
 		idle-cost-data = <
 			4 3 2 1
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index e9a913f..26b0385 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -776,6 +776,7 @@
 		      <0x17a60000 0x100000>;    /* GICR * 8 */
 		interrupts = <1 9 4>;
 		interrupt-parent = <&intc>;
+		ignored-save-restore-irqs = <38>;
 	};
 
 	pdc: interrupt-controller@b220000{
@@ -2974,7 +2975,6 @@
 				 <DCC_READ 0x011360b0 1 0>,
 				 <DCC_READ 0x0113e030 2 0>,
 				 <DCC_READ 0x01141000 1 0>,
-				 <DCC_READ 0x01142028 1 0>,
 				 <DCC_READ 0x01148058 4 0>,
 				 <DCC_READ 0x01160410 3 0>,
 				 <DCC_READ 0x011604a0 1 0>,
@@ -2985,7 +2985,6 @@
 				 <DCC_READ 0x011b6044 4 0>,
 				 <DCC_READ 0x011be030 2 0>,
 				 <DCC_READ 0x011c1000 1 0>,
-				 <DCC_READ 0x011c2028 1 0>,
 				 <DCC_READ 0x011c8058 4 0>,
 				 <DCC_READ 0x011e0410 3 0>,
 				 <DCC_READ 0x011e04a0 1 0>,
@@ -2997,7 +2996,6 @@
 				 <DCC_READ 0x012360B0 1 0>,
 				 <DCC_READ 0x0123E030 2 0>,
 				 <DCC_READ 0x01241000 1 0>,
-				 <DCC_READ 0x01242028 1 0>,
 				 <DCC_READ 0x01248058 4 0>,
 				 <DCC_READ 0x01260410 3 0>,
 				 <DCC_READ 0x012604a0 1 0>,
@@ -3010,7 +3008,6 @@
 				 <DCC_READ 0x012b60b0 1 0>,
 				 <DCC_READ 0x012be030 2 0>,
 				 <DCC_READ 0x012c1000 1 0>,
-				 <DCC_READ 0x012c2028 1 0>,
 				 <DCC_READ 0x012c8058 4 0>,
 				 <DCC_READ 0x012e0410 3 0>,
 				 <DCC_READ 0x012e04a0 1 0>,
@@ -3019,7 +3016,6 @@
 				 <DCC_READ 0x012e6418 1 0>,
 				 <DCC_READ 0x01380900 8 0>,
 				 <DCC_READ 0x01380d00 5 0>,
-				 <DCC_READ 0x01350110 4 0>,
 				 <DCC_READ 0x01430280 1 0>,
 				 <DCC_READ 0x01430288 1 0>,
 				 <DCC_READ 0x0143028c 7 0>,
@@ -3028,7 +3024,6 @@
 				 <DCC_READ 0x011360b0 1 0>,
 				 <DCC_READ 0x0113e030 2 0>,
 				 <DCC_READ 0x01141000 1 0>,
-				 <DCC_READ 0x01142028 1 0>,
 				 <DCC_READ 0x01148058 4 0>,
 				 <DCC_READ 0x01160410 3 0>,
 				 <DCC_READ 0x011604a0 1 0>,
@@ -3039,7 +3034,6 @@
 				 <DCC_READ 0x011b6044 4 0>,
 				 <DCC_READ 0x011be030 2 0>,
 				 <DCC_READ 0x011c1000 1 0>,
-				 <DCC_READ 0x011c2028 1 0>,
 				 <DCC_READ 0x011c8058 4 0>,
 				 <DCC_READ 0x011e0410 3 0>,
 				 <DCC_READ 0x011e04a0 1 0>,
@@ -3051,7 +3045,6 @@
 				 <DCC_READ 0x012360b0 1 0>,
 				 <DCC_READ 0x0123e030 2 0>,
 				 <DCC_READ 0x01241000 1 0>,
-				 <DCC_READ 0x01242028 1 0>,
 				 <DCC_READ 0x01248058 4 0>,
 				 <DCC_READ 0x01260410 3 0>,
 				 <DCC_READ 0x012604a0 1 0>,
@@ -3064,7 +3057,6 @@
 				 <DCC_READ 0x012b60b0 1 0>,
 				 <DCC_READ 0x012be030 2 0>,
 				 <DCC_READ 0x012C1000 1 0>,
-				 <DCC_READ 0x012C2028 1 0>,
 				 <DCC_READ 0x012C8058 4 0>,
 				 <DCC_READ 0x012e0410 3 0>,
 				 <DCC_READ 0x012e04a0 1 0>,
@@ -3073,7 +3065,6 @@
 				 <DCC_READ 0x012e6418 1 0>,
 				 <DCC_READ 0x01380900 8 0>,
 				 <DCC_READ 0x01380d00 5 0>,
-				 <DCC_READ 0x01350110 4 0>,
 				 <DCC_READ 0x01430280 1 0>,
 				 <DCC_READ 0x01430288 1 0>,
 				 <DCC_READ 0x0143028c 7 0>,
diff --git a/arch/arm64/boot/dts/qcom/smb1355.dtsi b/arch/arm64/boot/dts/qcom/smb1355.dtsi
index 3412b25d..5939440 100644
--- a/arch/arm64/boot/dts/qcom/smb1355.dtsi
+++ b/arch/arm64/boot/dts/qcom/smb1355.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,98 +12,96 @@
 
 #include <dt-bindings/interrupt-controller/irq.h>
 
-&qupv3_se10_i2c {
-	smb1355_0: qcom,smb1355@8 {
-		compatible = "qcom,i2c-pmic";
-		reg = <0x8>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupt-parent = <&spmi_bus>;
-		interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
-		interrupt_names = "smb1355_0";
-		interrupt-controller;
-		#interrupt-cells = <3>;
-		qcom,periph-map = <0x10 0x12 0x13 0x16>;
+smb1355_0: qcom,smb1355@8 {
+	compatible = "qcom,i2c-pmic";
+	reg = <0x8>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	interrupt-parent = <&spmi_bus>;
+	interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+	interrupt_names = "smb1355_0";
+	interrupt-controller;
+	#interrupt-cells = <3>;
+	qcom,periph-map = <0x10 0x12 0x13 0x16>;
 
-		smb1355_revid_0: qcom,revid@100 {
-			compatible = "qcom,qpnp-revid";
-			reg = <0x100 0x100>;
-		};
-
-		smb1355_charger_0: qcom,smb1355-charger@1000 {
-			compatible = "qcom,smb1355";
-			qcom,pmic-revid = <&smb1355_revid_0>;
-			reg = <0x1000 0x700>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			interrupt-parent = <&smb1355_0>;
-			status = "disabled";
-
-			io-channels = <&pmi8998_rradc 2>,
-				      <&pmi8998_rradc 12>;
-			io-channel-names = "charger_temp",
-					   "charger_temp_max";
-
-			qcom,chgr@1000 {
-				reg = <0x1000 0x100>;
-				interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
-				interrupt-names = "chg-state-change";
-			};
-
-			qcom,chgr-misc@1600 {
-				reg = <0x1600 0x100>;
-				interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
-					     <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
-				interrupt-names = "wdog-bark",
-						  "temperature-change";
-			};
-		};
+	smb1355_revid_0: qcom,revid@100 {
+		compatible = "qcom,qpnp-revid";
+		reg = <0x100 0x100>;
 	};
 
-	smb1355_1: qcom,smb1355@c {
-		compatible = "qcom,i2c-pmic";
-		reg = <0xc>;
+	smb1355_charger_0: qcom,smb1355-charger@1000 {
+		compatible = "qcom,smb1355";
+		qcom,pmic-revid = <&smb1355_revid_0>;
+		reg = <0x1000 0x700>;
 		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupt-parent = <&spmi_bus>;
-		interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
-		interrupt_names = "smb1355_1";
-		interrupt-controller;
-		#interrupt-cells = <3>;
-		qcom,periph-map = <0x10 0x12 0x13 0x16>;
+		#size-cells = <1>;
+		interrupt-parent = <&smb1355_0>;
+		status = "disabled";
 
-		smb1355_revid_1: qcom,revid@100 {
-			compatible = "qcom,qpnp-revid";
-			reg = <0x100 0x100>;
+		io-channels = <&pmi8998_rradc 2>,
+			      <&pmi8998_rradc 12>;
+		io-channel-names = "charger_temp",
+				   "charger_temp_max";
+
+		qcom,chgr@1000 {
+			reg = <0x1000 0x100>;
+			interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "chg-state-change";
 		};
 
-		smb1355_charger_1: qcom,smb1355-charger@1000 {
-			compatible = "qcom,smb1355";
-			qcom,pmic-revid = <&smb1355_revid_1>;
-			reg = <0x1000 0x700>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			interrupt-parent = <&smb1355_1>;
-			status = "disabled";
+		qcom,chgr-misc@1600 {
+			reg = <0x1600 0x100>;
+			interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+				     <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "wdog-bark",
+					  "temperature-change";
+		};
+	};
+};
 
-			io-channels = <&pmi8998_rradc 2>,
-				      <&pmi8998_rradc 12>;
-			io-channel-names = "charger_temp",
-					   "charger_temp_max";
+smb1355_1: qcom,smb1355@c {
+	compatible = "qcom,i2c-pmic";
+	reg = <0xc>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	interrupt-parent = <&spmi_bus>;
+	interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+	interrupt_names = "smb1355_1";
+	interrupt-controller;
+	#interrupt-cells = <3>;
+	qcom,periph-map = <0x10 0x12 0x13 0x16>;
 
-			qcom,chgr@1000 {
-				reg = <0x1000 0x100>;
-				interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
-				interrupt-names = "chg-state-change";
-			};
+	smb1355_revid_1: qcom,revid@100 {
+		compatible = "qcom,qpnp-revid";
+		reg = <0x100 0x100>;
+	};
 
-			qcom,chgr-misc@1600 {
-				reg = <0x1600 0x100>;
-				interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
-					     <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
-				interrupt-names = "wdog-bark",
-						  "temperature-change";
-			};
+	smb1355_charger_1: qcom,smb1355-charger@1000 {
+		compatible = "qcom,smb1355";
+		qcom,pmic-revid = <&smb1355_revid_1>;
+		reg = <0x1000 0x700>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		interrupt-parent = <&smb1355_1>;
+		status = "disabled";
+
+		io-channels = <&pmi8998_rradc 2>,
+			      <&pmi8998_rradc 12>;
+		io-channel-names = "charger_temp",
+				   "charger_temp_max";
+
+		qcom,chgr@1000 {
+			reg = <0x1000 0x100>;
+			interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "chg-state-change";
+		};
+
+		qcom,chgr-misc@1600 {
+			reg = <0x1600 0x100>;
+			interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+				     <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "wdog-bark",
+					  "temperature-change";
 		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/smb1390.dtsi b/arch/arm64/boot/dts/qcom/smb1390.dtsi
new file mode 100644
index 0000000..92ac103
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/smb1390.dtsi
@@ -0,0 +1,61 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+smb1390: qcom,smb1390@10 {
+	compatible = "qcom,i2c-pmic";
+	reg = <0x10>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	interrupt-parent = <&spmi_bus>;
+	interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+	interrupt-names = "smb1390";
+	interrupt-controller;
+	#interrupt-cells = <3>;
+	qcom,periph-map = <0x10>;
+
+	smb1390_revid: qcom,revid {
+		compatible = "qcom,qpnp-revid";
+		reg = <0x100>;
+	};
+
+	smb1390_charger: qcom,charge_pump {
+		compatible = "qcom,smb1390-charger";
+		qcom,pmic-revid = <&smb1390_revid>;
+		interrupt-parent = <&smb1390>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&smb1390_die_temp_default>;
+		qcom,smb-vadc = <&pm8998_vadc>;
+		qcom,channel-num = <0x14>;
+		status = "disabled";
+
+		qcom,core {
+			interrupts = <0x10 0x0 IRQ_TYPE_EDGE_RISING>,
+				     <0x10 0x1 IRQ_TYPE_EDGE_RISING>,
+				     <0x10 0x2 IRQ_TYPE_EDGE_RISING>,
+				     <0x10 0x3 IRQ_TYPE_EDGE_RISING>,
+				     <0x10 0x4 IRQ_TYPE_EDGE_RISING>,
+				     <0x10 0x5 IRQ_TYPE_EDGE_RISING>,
+				     <0x10 0x6 IRQ_TYPE_EDGE_RISING>,
+				     <0x10 0x7 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "switcher-off-window",
+					  "switcher-off-fault",
+					  "tsd-fault",
+					  "irev-fault",
+					  "vph-ov-hard",
+					  "vph-ov-soft",
+					  "ilim",
+					  "temp-alarm";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp-overlay.dts
new file mode 100644
index 0000000..c3aa763
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp-overlay.dts
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sxr1120-lc-cdp.dtsi"
+#include "qcs605-lc-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC Groot+PM8005 CDP";
+	compatible = "qcom,sxr1120-cdp", "qcom,sxr1120", "qcom,cdp";
+	qcom,msm-id = <370 0x0>;
+	qcom,board-id = <1 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp.dts
new file mode 100644
index 0000000..6237873
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sxr1120-lc.dtsi"
+#include "sxr1120-lc-cdp.dtsi"
+#include "qcs605-lc-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC Groot+PM8005 CDP";
+	compatible = "qcom,sxr1120-cdp", "qcom,sxr1120", "qcom,cdp";
+	qcom,board-id = <1 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp.dtsi b/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp.dtsi
new file mode 100644
index 0000000..d4d42c5
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-cdp.dtsi
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "qcs605-lc-cdp.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp-overlay.dts
new file mode 100644
index 0000000..e90f3b4
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp-overlay.dts
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sxr1120-lc-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC Groot+PM8005 Ext. Audio Codec CDP";
+	compatible = "qcom,sxr1120-cdp", "qcom,sxr1120", "qcom,cdp";
+	qcom,msm-id = <370 0x0>;
+	qcom,board-id = <1 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp.dts
new file mode 100644
index 0000000..76c424d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sxr1120-lc.dtsi"
+#include "sxr1120-lc-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC Groot+PM8005 Ext. Audio Codec CDP";
+	compatible = "qcom,sxr1120-cdp", "qcom,sxr1120", "qcom,cdp";
+	qcom,board-id = <1 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp-overlay.dts
new file mode 100644
index 0000000..946298f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp-overlay.dts
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sxr1120-lc-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC Groot+PM8005 Ext. Audio Codec MTP";
+	compatible = "qcom,sxr1120-mtp", "qcom,sxr1120", "qcom,mtp";
+	qcom,msm-id = <370 0x0>;
+	qcom,board-id = <8 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp.dts
new file mode 100644
index 0000000..e53bbe3
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sxr1120-lc.dtsi"
+#include "sxr1120-lc-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC Groot+PM8005 Ext. Audio Codec MTP";
+	compatible = "qcom,sxr1120-mtp", "qcom,sxr1120", "qcom,mtp";
+	qcom,board-id = <8 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp-overlay.dts
new file mode 100644
index 0000000..8af46ef
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp-overlay.dts
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sxr1120-lc-mtp.dtsi"
+#include "qcs605-lc-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC Groot+PM8005 MTP";
+	compatible = "qcom,sxr1120-mtp", "qcom,sxr1120", "qcom,mtp";
+	qcom,msm-id = <370 0x0>;
+	qcom,board-id = <8 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp.dts
new file mode 100644
index 0000000..ffcdeda
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sxr1120-lc.dtsi"
+#include "sxr1120-lc-mtp.dtsi"
+#include "qcs605-lc-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC Groot+PM8005 MTP";
+	compatible = "qcom,sxr1120-mtp", "qcom,sxr1120", "qcom,mtp";
+	qcom,board-id = <8 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp.dtsi b/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp.dtsi
new file mode 100644
index 0000000..270aa0e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-mtp.dtsi
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "qcs605-lc-mtp.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc.dts
new file mode 100644
index 0000000..5967388
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc.dts
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sxr1120-lc.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 LC SoC";
+	compatible = "qcom,sxr1120";
+	qcom,board-id = <0 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc.dtsi b/arch/arm64/boot/dts/qcom/sxr1120-lc.dtsi
new file mode 100644
index 0000000..1413e2c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc.dtsi
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "qcs605-lc.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SXR1120 SoC";
+	compatible = "qcom,sxr1120";
+	qcom,msm-id = <370 0x0>;
+};
diff --git a/arch/arm64/configs/msm8937-perf_defconfig b/arch/arm64/configs/msm8937-perf_defconfig
index 1a31ee3..6aa47eb 100644
--- a/arch/arm64/configs/msm8937-perf_defconfig
+++ b/arch/arm64/configs/msm8937-perf_defconfig
@@ -18,6 +18,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
@@ -102,6 +103,7 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
@@ -113,6 +115,7 @@
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -243,6 +246,7 @@
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
+CONFIG_FPR_FPC=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -307,6 +311,10 @@
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_HBTP_INPUT=y
 CONFIG_INPUT_QPNP_POWER_ON=y
@@ -341,9 +349,9 @@
 CONFIG_GPIO_QPNP_PIN=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
@@ -365,7 +373,9 @@
 CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_CPR=y
@@ -385,9 +395,7 @@
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_MSM_CAMERA=y
-CONFIG_MSM_CAMERA_DEBUG=y
 CONFIG_MSMB_CAMERA=y
-CONFIG_MSMB_CAMERA_DEBUG=y
 CONFIG_MSM_CAMERA_SENSOR=y
 CONFIG_MSM_CPP=y
 CONFIG_MSM_CCI=y
@@ -556,7 +564,6 @@
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
-CONFIG_ICNSS=y
 CONFIG_MSM_PERFORMANCE=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_AVTIMER=y
@@ -569,12 +576,14 @@
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
diff --git a/arch/arm64/configs/msm8937_defconfig b/arch/arm64/configs/msm8937_defconfig
index 2bcdc2c..02e4d68 100644
--- a/arch/arm64/configs/msm8937_defconfig
+++ b/arch/arm64/configs/msm8937_defconfig
@@ -19,6 +19,7 @@
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
 CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
@@ -106,6 +107,7 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
@@ -117,6 +119,7 @@
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -249,6 +252,7 @@
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
+CONFIG_FPR_FPC=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -313,6 +317,10 @@
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_HBTP_INPUT=y
 CONFIG_INPUT_QPNP_POWER_ON=y
@@ -349,9 +357,9 @@
 CONFIG_GPIO_QPNP_PIN=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
@@ -373,7 +381,9 @@
 CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_CPR=y
@@ -553,7 +563,6 @@
 CONFIG_QCOM_SCM=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_MSM_CORE_HANG_DETECT=y
-CONFIG_MSM_GLADIATOR_HANG_DETECT=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
 CONFIG_MSM_DEBUG_LAR_UNLOCK=y
@@ -574,7 +583,6 @@
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
-CONFIG_ICNSS=y
 CONFIG_MSM_PERFORMANCE=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_AVTIMER=y
@@ -587,12 +595,14 @@
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
@@ -634,7 +644,6 @@
 CONFIG_DEBUG_OBJECTS_WORK=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
-CONFIG_SLUB_DEBUG_ON=y
 CONFIG_DEBUG_KMEMLEAK=y
 CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
 CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index 8fece0ee..9ac93aa 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -18,6 +18,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
@@ -103,6 +104,7 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
@@ -114,6 +116,7 @@
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -342,9 +345,9 @@
 CONFIG_GPIO_QPNP_PIN=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
@@ -366,7 +369,9 @@
 CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_CPR=y
@@ -541,6 +546,7 @@
 CONFIG_MSM_RMNET_BAM=y
 CONFIG_MSM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_TIMER_LEAP=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
 CONFIG_QCOM_LAZY_MAPPING=y
@@ -579,12 +585,14 @@
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index 0eb9df4..af1bd0e 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -19,6 +19,7 @@
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
 CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
@@ -107,6 +108,7 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
@@ -118,6 +120,7 @@
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -351,9 +354,9 @@
 CONFIG_GPIO_QPNP_PIN=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
@@ -375,7 +378,9 @@
 CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_CPR=y
@@ -552,6 +557,7 @@
 CONFIG_MSM_RMNET_BAM=y
 CONFIG_MSM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_TIMER_LEAP=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
 CONFIG_QCOM_LAZY_MAPPING=y
@@ -565,7 +571,6 @@
 CONFIG_QCOM_SCM=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_MSM_CORE_HANG_DETECT=y
-CONFIG_MSM_GLADIATOR_HANG_DETECT=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
 CONFIG_MSM_DEBUG_LAR_UNLOCK=y
@@ -599,12 +604,14 @@
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_PWM_QTI_LPG=y
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index 8e7c369..956bcc5 100644
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -103,9 +103,11 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
 CONFIG_INET_DIAG_DESTROY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
@@ -114,6 +116,7 @@
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -302,7 +305,6 @@
 CONFIG_INPUT_QPNP_POWER_ON=y
 CONFIG_INPUT_UINPUT=y
 # CONFIG_SERIO_SERPORT is not set
-# CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVMEM is not set
 # CONFIG_DEVKMEM is not set
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index 5fb8fdb..f6895a5 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -108,9 +108,11 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
 CONFIG_INET_DIAG_DESTROY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
@@ -119,6 +121,7 @@
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -309,7 +312,6 @@
 CONFIG_INPUT_QPNP_POWER_ON=y
 CONFIG_INPUT_UINPUT=y
 # CONFIG_SERIO_SERPORT is not set
-# CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
 CONFIG_SERIAL_MSM_GENI=y
 CONFIG_SERIAL_MSM_GENI_CONSOLE=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index fe5b5b5..e35e571 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -83,6 +83,7 @@
 # CONFIG_PM_WAKELOCKS_GC is not set
 CONFIG_CPU_IDLE=y
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
@@ -102,9 +103,11 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
 CONFIG_INET_DIAG_DESTROY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
@@ -113,6 +116,7 @@
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -549,6 +553,7 @@
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
 CONFIG_QSEE_IPC_IRQ_BRIDGE=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
@@ -572,8 +577,11 @@
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
 CONFIG_F2FS_FS=y
 CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -605,6 +613,7 @@
 CONFIG_CORESIGHT_EVENT=y
 CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_CORESIGHT_DUMMY=y
+CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 666f350..ab983e0 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -86,6 +86,7 @@
 CONFIG_PM_DEBUG=y
 CONFIG_CPU_IDLE=y
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
@@ -105,9 +106,11 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
 CONFIG_INET_DIAG_DESTROY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
@@ -116,6 +119,7 @@
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_NETFILTER=y
@@ -466,7 +470,6 @@
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
 CONFIG_EDAC_KRYO3XX_ARM64=y
-CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_CE=y
 CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_QPNP=y
@@ -568,6 +571,7 @@
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
 CONFIG_MSM_REMOTEQDSS=y
 CONFIG_QSEE_IPC_IRQ_BRIDGE=y
 CONFIG_QCOM_BIMC_BWMON=y
@@ -593,8 +597,11 @@
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
 CONFIG_F2FS_FS=y
 CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -670,6 +677,7 @@
 CONFIG_CORESIGHT_TGU=y
 CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_CORESIGHT_DUMMY=y
+CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 28196b1..fd38814 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -27,6 +27,7 @@
 generic-y += poll.h
 generic-y += preempt.h
 generic-y += resource.h
+generic-y += qrwlock.h
 generic-y += rwsem.h
 generic-y += segment.h
 generic-y += sembuf.h
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index c4cc771..f8b5c48 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -228,6 +228,7 @@
 }
 
 #define gic_read_typer(c)		readq_relaxed_no_log(c)
+#define gic_read_irouter(c)		readq_relaxed_no_log(c)
 #define gic_write_irouter(v, c)		writeq_relaxed_no_log(v, c)
 
 #endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index eaa5bbe..f95235a 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -148,8 +148,17 @@
 
 static inline u64 arch_counter_get_cntvct(void)
 {
+	u64 cval;
 	isb();
-	return arch_timer_reg_read_stable(cntvct_el0);
+#if IS_ENABLED(CONFIG_MSM_TIMER_LEAP)
+#define L32_BITS       0x00000000FFFFFFFF
+	do {
+		cval = arch_timer_reg_read_stable(cntvct_el0);
+	} while ((cval & L32_BITS) == L32_BITS);
+#else
+		cval = arch_timer_reg_read_stable(cntvct_el0);
+#endif
+	return cval;
 }
 
 static inline int arch_timer_arch_init(void)
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index c5bc52e..a891bb6 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -48,20 +48,9 @@
 } while (0)
 
 static inline int
-futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *_uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (int)(encoded_op << 8) >> 20;
-	int cmparg = (int)(encoded_op << 20) >> 20;
 	int oldval = 0, ret, tmp;
-	u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
-
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1U << (oparg & 0x1f);
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
 
 	pagefault_disable();
 
@@ -92,17 +81,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 5c0c57e..388a0ca 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -167,9 +167,9 @@
 #define readq_relaxed_no_log(c)	({ u64 __v = le64_to_cpu((__force __le64)__raw_readq_no_log(c)); __v; })
 
 #define writeb_relaxed_no_log(v, c)	((void)__raw_writeb_no_log((v), (c)))
-#define writew_relaxed_no_log(v, c)	((void)__raw_writew_no_log((__force u16)cpu_to_le32(v), (c)))
+#define writew_relaxed_no_log(v, c)	((void)__raw_writew_no_log((__force u16)cpu_to_le16(v), (c)))
 #define writel_relaxed_no_log(v, c)	((void)__raw_writel_no_log((__force u32)cpu_to_le32(v), (c)))
-#define writeq_relaxed_no_log(v, c)	((void)__raw_writeq_no_log((__force u64)cpu_to_le32(v), (c)))
+#define writeq_relaxed_no_log(v, c)	((void)__raw_writeq_no_log((__force u64)cpu_to_le64(v), (c)))
 
 /*
  * I/O memory access primitives. Reads are ordered relative to any
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 37d56e8..0a33ea3 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -73,6 +73,9 @@
 
 	/* Timer */
 	struct arch_timer_kvm	timer;
+
+	/* Mandated version of PSCI */
+	u32 psci_version;
 };
 
 #define KVM_NR_MEM_OBJS     40
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 36d2aba..24a8369 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -313,6 +313,22 @@
 	return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
 }
 
+/*
+ * We are not in the kvm->srcu critical section most of the time, so we take
+ * the SRCU read lock here. Since we copy the data from the user page, we
+ * can immediately drop the lock again.
+ */
+static inline int kvm_read_guest_lock(struct kvm *kvm,
+				      gpa_t gpa, void *data, unsigned long len)
+{
+	int srcu_idx = srcu_read_lock(&kvm->srcu);
+	int ret = kvm_read_guest(kvm, gpa, data, len);
+
+	srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+	return ret;
+}
+
 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 #include <asm/mmu.h>
 
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 55082cc..53c0f54 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -140,8 +140,8 @@
 	"	cbnz	%w1, 1f\n"
 	"	add	%w1, %w0, %3\n"
 	"	casa	%w0, %w1, %2\n"
-	"	and	%w1, %w1, #0xffff\n"
-	"	eor	%w1, %w1, %w0, lsr #16\n"
+	"	sub	%w1, %w1, %3\n"
+	"	eor	%w1, %w1, %w0\n"
 	"1:")
 	: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
 	: "I" (1 << TICKET_SHIFT)
@@ -186,169 +186,7 @@
 }
 #define arch_spin_is_contended	arch_spin_is_contended
 
-/*
- * Write lock implementation.
- *
- * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
- * exclusively held.
- *
- * The memory barriers are implicit with the load-acquire and store-release
- * instructions.
- */
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
-	unsigned int tmp;
-
-	asm volatile(ARM64_LSE_ATOMIC_INSN(
-	/* LL/SC */
-	"	sevl\n"
-	"1:	wfe\n"
-	"2:	ldaxr	%w0, %1\n"
-	"	cbnz	%w0, 1b\n"
-	"	stxr	%w0, %w2, %1\n"
-	"	cbnz	%w0, 2b\n"
-	__nops(1),
-	/* LSE atomics */
-	"1:	mov	%w0, wzr\n"
-	"2:	casa	%w0, %w2, %1\n"
-	"	cbz	%w0, 3f\n"
-	"	ldxr	%w0, %1\n"
-	"	cbz	%w0, 2b\n"
-	"	wfe\n"
-	"	b	1b\n"
-	"3:")
-	: "=&r" (tmp), "+Q" (rw->lock)
-	: "r" (0x80000000)
-	: "memory");
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
-	unsigned int tmp;
-
-	asm volatile(ARM64_LSE_ATOMIC_INSN(
-	/* LL/SC */
-	"1:	ldaxr	%w0, %1\n"
-	"	cbnz	%w0, 2f\n"
-	"	stxr	%w0, %w2, %1\n"
-	"	cbnz	%w0, 1b\n"
-	"2:",
-	/* LSE atomics */
-	"	mov	%w0, wzr\n"
-	"	casa	%w0, %w2, %1\n"
-	__nops(2))
-	: "=&r" (tmp), "+Q" (rw->lock)
-	: "r" (0x80000000)
-	: "memory");
-
-	return !tmp;
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
-	asm volatile(ARM64_LSE_ATOMIC_INSN(
-	"	stlr	wzr, %0",
-	"	swpl	wzr, wzr, %0")
-	: "=Q" (rw->lock) :: "memory");
-}
-
-/* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x)		((x)->lock == 0)
-
-/*
- * Read lock implementation.
- *
- * It exclusively loads the lock value, increments it and stores the new value
- * back if positive and the CPU still exclusively owns the location. If the
- * value is negative, the lock is already held.
- *
- * During unlocking there may be multiple active read locks but no write lock.
- *
- * The memory barriers are implicit with the load-acquire and store-release
- * instructions.
- *
- * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
- * and LSE implementations may exhibit different behaviour (although this
- * will have no effect on lockdep).
- */
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
-	unsigned int tmp, tmp2;
-
-	asm volatile(
-	"	sevl\n"
-	ARM64_LSE_ATOMIC_INSN(
-	/* LL/SC */
-	"1:	wfe\n"
-	"2:	ldaxr	%w0, %2\n"
-	"	add	%w0, %w0, #1\n"
-	"	tbnz	%w0, #31, 1b\n"
-	"	stxr	%w1, %w0, %2\n"
-	"	cbnz	%w1, 2b\n"
-	__nops(1),
-	/* LSE atomics */
-	"1:	wfe\n"
-	"2:	ldxr	%w0, %2\n"
-	"	adds	%w1, %w0, #1\n"
-	"	tbnz	%w1, #31, 1b\n"
-	"	casa	%w0, %w1, %2\n"
-	"	sbc	%w0, %w1, %w0\n"
-	"	cbnz	%w0, 2b")
-	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
-	:
-	: "cc", "memory");
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
-	unsigned int tmp, tmp2;
-
-	asm volatile(ARM64_LSE_ATOMIC_INSN(
-	/* LL/SC */
-	"1:	ldxr	%w0, %2\n"
-	"	sub	%w0, %w0, #1\n"
-	"	stlxr	%w1, %w0, %2\n"
-	"	cbnz	%w1, 1b",
-	/* LSE atomics */
-	"	movn	%w0, #0\n"
-	"	staddl	%w0, %2\n"
-	__nops(2))
-	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
-	:
-	: "memory");
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
-	unsigned int tmp, tmp2;
-
-	asm volatile(ARM64_LSE_ATOMIC_INSN(
-	/* LL/SC */
-	"	mov	%w1, #1\n"
-	"1:	ldaxr	%w0, %2\n"
-	"	add	%w0, %w0, #1\n"
-	"	tbnz	%w0, #31, 2f\n"
-	"	stxr	%w1, %w0, %2\n"
-	"	cbnz	%w1, 1b\n"
-	"2:",
-	/* LSE atomics */
-	"	ldr	%w0, %2\n"
-	"	adds	%w1, %w0, #1\n"
-	"	tbnz	%w1, #31, 1f\n"
-	"	casa	%w0, %w1, %2\n"
-	"	sbc	%w1, %w1, %w0\n"
-	__nops(1)
-	"1:")
-	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
-	:
-	: "cc", "memory");
-
-	return !tmp2;
-}
-
-/* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x)		((x)->lock < 0x80000000)
+#include <asm/qrwlock.h>
 
 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index 55be59a..6b85601 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -36,10 +36,6 @@
 
 #define __ARCH_SPIN_LOCK_UNLOCKED	{ 0 , 0 }
 
-typedef struct {
-	volatile unsigned int lock;
-} arch_rwlock_t;
-
-#define __ARCH_RW_LOCK_UNLOCKED		{ 0 }
+#include <asm-generic/qrwlock_types.h>
 
 #endif
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 801a16db..7d2a15a 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -23,7 +23,7 @@
 	unsigned long sp;
 	unsigned long pc;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-	unsigned int graph;
+	int graph;
 #endif
 };
 
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 6c35d21..de21caa 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -312,7 +312,7 @@
 			       (err), ARM64_HAS_UAO);			\
 		break;							\
 	case 8:								\
-		__get_user_asm("ldr", "ldtr", "%",  __gu_val, (ptr),	\
+		__get_user_asm("ldr", "ldtr", "%x",  __gu_val, (ptr),	\
 			       (err), ARM64_HAS_UAO);			\
 		break;							\
 	default:							\
@@ -384,7 +384,7 @@
 			       (err), ARM64_HAS_UAO);			\
 		break;							\
 	case 8:								\
-		__put_user_asm("str", "sttr", "%", __pu_val, (ptr),	\
+		__put_user_asm("str", "sttr", "%x", __pu_val, (ptr),	\
 			       (err), ARM64_HAS_UAO);			\
 		break;							\
 	default:							\
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 3051f86..702de7a 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -195,6 +195,12 @@
 #define KVM_REG_ARM_TIMER_CNT		ARM64_SYS_REG(3, 3, 14, 3, 2)
 #define KVM_REG_ARM_TIMER_CVAL		ARM64_SYS_REG(3, 3, 14, 0, 2)
 
+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW			(0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r)		(KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+					 KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION	KVM_REG_ARM_FW_REG(0)
+
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR	0
 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS	1
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 49e548f..e7908c9 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -177,7 +177,7 @@
 	case PSCI_CONDUIT_HVC:
 		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-		if (res.a0)
+		if ((int)res.a0 < 0)
 			return 0;
 		cb = call_hvc_arch_workaround_1;
 		smccc_start = __smccc_workaround_1_hvc_start;
@@ -187,7 +187,7 @@
 	case PSCI_CONDUIT_SMC:
 		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-		if (res.a0)
+		if ((int)res.a0 < 0)
 			return 0;
 		cb = call_smc_arch_workaround_1;
 		smccc_start = __smccc_workaround_1_smc_start;
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index c9a2ab4..f035ff6 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -32,11 +32,16 @@
 
 void *module_alloc(unsigned long size)
 {
+	gfp_t gfp_mask = GFP_KERNEL;
 	void *p;
 
+	/* Silence the initial allocation */
+	if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
+		gfp_mask |= __GFP_NOWARN;
+
 	p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
 				module_alloc_base + MODULES_VSIZE,
-				GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+				gfp_mask, PAGE_KERNEL_EXEC, 0,
 				NUMA_NO_NODE, __builtin_return_address(0));
 
 	if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 623dd48..69d3266 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -602,16 +602,6 @@
 void (*__smp_cross_call)(const struct cpumask *, unsigned int);
 DEFINE_PER_CPU(bool, pending_ipi);
 
-void smp_cross_call_common(const struct cpumask *cpumask, unsigned int func)
-{
-	unsigned int cpu;
-
-	for_each_cpu(cpu, cpumask)
-		per_cpu(pending_ipi, cpu) = true;
-
-	__smp_cross_call(cpumask, func);
-}
-
 /*
  * Enumerate the possible CPU set from the device tree and build the
  * cpu logical map array containing MPIDR values related to logical
@@ -779,6 +769,17 @@
 	__smp_cross_call(target, ipinr);
 }
 
+static void smp_cross_call_common(const struct cpumask *cpumask,
+				  unsigned int func)
+{
+	unsigned int cpu;
+
+	for_each_cpu(cpu, cpumask)
+		per_cpu(pending_ipi, cpu) = true;
+
+	smp_cross_call(cpumask, func);
+}
+
 void show_ipi_list(struct seq_file *p, int prec)
 {
 	unsigned int cpu, i;
@@ -825,7 +826,8 @@
 void arch_irq_work_raise(void)
 {
 	if (__smp_cross_call)
-		smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+		smp_cross_call_common(cpumask_of(smp_processor_id()),
+				      IPI_IRQ_WORK);
 }
 #endif
 
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index f7ce3d2..4fa6d84 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -73,6 +73,11 @@
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	if (tsk->ret_stack &&
 			(frame->pc == (unsigned long)return_to_handler)) {
+		if (WARN_ON_ONCE(frame->graph == -1))
+			return -EINVAL;
+		if (frame->graph < -1)
+			frame->graph += FTRACE_NOTRACE_DEPTH;
+
 		/*
 		 * This is a case where function graph tracer has
 		 * modified a return address (LR) in a stack frame
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 5977969..5d9076e 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -53,7 +53,7 @@
 	frame.sp = regs->sp;
 	frame.pc = regs->pc;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-	frame.graph = -1; /* no task info */
+	frame.graph = current->curr_ret_stack;
 #endif
 	do {
 		int ret = unwind_frame(NULL, &frame);
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index c39872a..2bd7426 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -66,7 +66,16 @@
 	.macro	get_clock_shifted_nsec res, cycle_last, mult
 	/* Read the virtual counter. */
 	isb
+#if IS_ENABLED(CONFIG_MSM_TIMER_LEAP)
+#define LEAST_32BITS	0x00000000FFFFFFFF
+9999:
+	mrs x_tmp, cntvct_el0
+	and \res, x_tmp, #LEAST_32BITS
+	eor \res, \res, #LEAST_32BITS
+	cbz \res, 9999b
+#else
 	mrs	x_tmp, cntvct_el0
+#endif
 	/* Calculate cycle delta and convert to ns. */
 	sub	\res, x_tmp, \cycle_last
 	/* We can only guarantee 56 bits of precision. */
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 3f9e157..d3e0a2f 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -25,6 +25,7 @@
 #include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
+#include <kvm/arm_psci.h>
 #include <asm/cputype.h>
 #include <asm/uaccess.h>
 #include <asm/kvm.h>
@@ -205,7 +206,7 @@
 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
 {
 	return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
-                + NUM_TIMER_REGS;
+		+ kvm_arm_get_fw_num_regs(vcpu)	+ NUM_TIMER_REGS;
 }
 
 /**
@@ -225,6 +226,11 @@
 		uindices++;
 	}
 
+	ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
+	if (ret)
+		return ret;
+	uindices += kvm_arm_get_fw_num_regs(vcpu);
+
 	ret = copy_timer_indices(vcpu, uindices);
 	if (ret)
 		return ret;
@@ -243,6 +249,9 @@
 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
 		return get_core_reg(vcpu, reg);
 
+	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+		return kvm_arm_get_fw_reg(vcpu, reg);
+
 	if (is_timer_reg(reg->id))
 		return get_timer_reg(vcpu, reg);
 
@@ -259,6 +268,9 @@
 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
 		return set_core_reg(vcpu, reg);
 
+	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+		return kvm_arm_set_fw_reg(vcpu, reg);
+
 	if (is_timer_reg(reg->id))
 		return set_timer_reg(vcpu, reg);
 
diff --git a/arch/frv/include/asm/futex.h b/arch/frv/include/asm/futex.h
index 4bea27f..2702bd8 100644
--- a/arch/frv/include/asm/futex.h
+++ b/arch/frv/include/asm/futex.h
@@ -7,7 +7,8 @@
 #include <asm/errno.h>
 #include <asm/uaccess.h>
 
-extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr);
+extern int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+		u32 __user *uaddr);
 
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c
index d155ca9..37f7b2b 100644
--- a/arch/frv/kernel/futex.c
+++ b/arch/frv/kernel/futex.c
@@ -186,20 +186,10 @@
 /*
  * do the futex operations
  */
-int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
-
 	pagefault_disable();
 
 	switch (op) {
@@ -225,18 +215,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS; break;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
 
 	return ret;
 
-} /* end futex_atomic_op_inuser() */
+} /* end arch_futex_atomic_op_inuser() */
diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h
index 7e597f8..c607b77 100644
--- a/arch/hexagon/include/asm/futex.h
+++ b/arch/hexagon/include/asm/futex.h
@@ -31,18 +31,9 @@
 
 
 static inline int
-futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
-		return -EFAULT;
 
 	pagefault_disable();
 
@@ -72,30 +63,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ:
-			ret = (oldval == cmparg);
-			break;
-		case FUTEX_OP_CMP_NE:
-			ret = (oldval != cmparg);
-			break;
-		case FUTEX_OP_CMP_LT:
-			ret = (oldval < cmparg);
-			break;
-		case FUTEX_OP_CMP_GE:
-			ret = (oldval >= cmparg);
-			break;
-		case FUTEX_OP_CMP_LE:
-			ret = (oldval <= cmparg);
-			break;
-		case FUTEX_OP_CMP_GT:
-			ret = (oldval > cmparg);
-			break;
-		default:
-			ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
index 76acbcd..6d67dc1 100644
--- a/arch/ia64/include/asm/futex.h
+++ b/arch/ia64/include/asm/futex.h
@@ -45,18 +45,9 @@
 } while (0)
 
 static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
 
 	pagefault_disable();
 
@@ -84,17 +75,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index 5ed0ea9..f851c9d 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -142,7 +142,7 @@
 	u64 virt_addr=simple_strtoull(buf, NULL, 16);
 	int ret;
 
-	ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL);
+	ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
 	if (ret<=0) {
 #ifdef ERR_INJ_DEBUG
 		printk("Virtual address %lx is not existing.\n",virt_addr);
diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
index a0fc0c1..3e8be0f 100644
--- a/arch/m68k/coldfire/device.c
+++ b/arch/m68k/coldfire/device.c
@@ -135,7 +135,11 @@
 	.id			= 0,
 	.num_resources		= ARRAY_SIZE(mcf_fec0_resources),
 	.resource		= mcf_fec0_resources,
-	.dev.platform_data	= FEC_PDATA,
+	.dev = {
+		.dma_mask		= &mcf_fec0.dev.coherent_dma_mask,
+		.coherent_dma_mask	= DMA_BIT_MASK(32),
+		.platform_data		= FEC_PDATA,
+	}
 };
 
 #ifdef MCFFEC_BASE1
@@ -167,7 +171,11 @@
 	.id			= 1,
 	.num_resources		= ARRAY_SIZE(mcf_fec1_resources),
 	.resource		= mcf_fec1_resources,
-	.dev.platform_data	= FEC_PDATA,
+	.dev = {
+		.dma_mask		= &mcf_fec1.dev.coherent_dma_mask,
+		.coherent_dma_mask	= DMA_BIT_MASK(32),
+		.platform_data		= FEC_PDATA,
+	}
 };
 #endif /* MCFFEC_BASE1 */
 #endif /* CONFIG_FEC */
diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h
index 01848f0..a9dad9e 100644
--- a/arch/microblaze/include/asm/futex.h
+++ b/arch/microblaze/include/asm/futex.h
@@ -29,18 +29,9 @@
 })
 
 static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
 
 	pagefault_disable();
 
@@ -66,30 +57,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ:
-			ret = (oldval == cmparg);
-			break;
-		case FUTEX_OP_CMP_NE:
-			ret = (oldval != cmparg);
-			break;
-		case FUTEX_OP_CMP_LT:
-			ret = (oldval < cmparg);
-			break;
-		case FUTEX_OP_CMP_GE:
-			ret = (oldval >= cmparg);
-			break;
-		case FUTEX_OP_CMP_LE:
-			ret = (oldval <= cmparg);
-			break;
-		case FUTEX_OP_CMP_GT:
-			ret = (oldval > cmparg);
-			break;
-		default:
-			ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2d2fd79..34fbbf8 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -95,6 +95,7 @@
 	select PCI_DRIVERS_GENERIC
 	select PINCTRL
 	select SMP_UP if SMP
+	select SWAP_IO_SPACE
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_HAS_CPU_MIPS32_R2
 	select SYS_HAS_CPU_MIPS32_R6
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 6ed1ded..6420c83 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -2271,7 +2271,7 @@
 
 	parent_irq = irq_of_parse_and_map(ciu_node, 0);
 	if (!parent_irq) {
-		pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
+		pr_err("ERROR: Couldn't acquire parent_irq for %s\n",
 			ciu_node->name);
 		return -EINVAL;
 	}
@@ -2283,7 +2283,7 @@
 
 	addr = of_get_address(ciu_node, 0, NULL, NULL);
 	if (!addr) {
-		pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
+		pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name);
 		return -EINVAL;
 	}
 	host_data->raw_reg = (u64)phys_to_virt(
@@ -2291,7 +2291,7 @@
 
 	addr = of_get_address(ciu_node, 1, NULL, NULL);
 	if (!addr) {
-		pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
+		pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name);
 		return -EINVAL;
 	}
 	host_data->en_reg = (u64)phys_to_virt(
@@ -2299,7 +2299,7 @@
 
 	r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
 	if (r) {
-		pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
+		pr_err("ERROR: Couldn't read cavium,max-bits from %s\n",
 			ciu_node->name);
 		return r;
 	}
@@ -2309,7 +2309,7 @@
 					   &octeon_irq_domain_cib_ops,
 					   host_data);
 	if (!cib_domain) {
-		pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
+		pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
 		return -ENOMEM;
 	}
 
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index 1de190b..a9e61ea 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -83,18 +83,9 @@
 }
 
 static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
 
 	pagefault_disable();
 
@@ -125,17 +116,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index aa3800c..d99ca86 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -167,7 +167,7 @@
 #define AR71XX_AHB_DIV_MASK		0x7
 
 #define AR724X_PLL_REG_CPU_CONFIG	0x00
-#define AR724X_PLL_REG_PCIE_CONFIG	0x18
+#define AR724X_PLL_REG_PCIE_CONFIG	0x10
 
 #define AR724X_PLL_FB_SHIFT		0
 #define AR724X_PLL_FB_MASK		0x3ff
diff --git a/arch/mips/include/asm/machine.h b/arch/mips/include/asm/machine.h
index 6b444cd..db930cd 100644
--- a/arch/mips/include/asm/machine.h
+++ b/arch/mips/include/asm/machine.h
@@ -52,7 +52,7 @@
 	if (!mach->matches)
 		return NULL;
 
-	for (match = mach->matches; match->compatible; match++) {
+	for (match = mach->matches; match->compatible[0]; match++) {
 		if (fdt_node_check_compatible(fdt, 0, match->compatible) == 0)
 			return match;
 	}
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 0c8ae2c..8f7bf74 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -483,7 +483,7 @@
 /*
  * Copy the floating-point context to the supplied NT_PRFPREG buffer.
  * Choose the appropriate helper for general registers, and then copy
- * the FCSR register separately.
+ * the FCSR and FIR registers separately.
  */
 static int fpr_get(struct task_struct *target,
 		   const struct user_regset *regset,
@@ -491,6 +491,7 @@
 		   void *kbuf, void __user *ubuf)
 {
 	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
+	const int fir_pos = fcr31_pos + sizeof(u32);
 	int err;
 
 	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
@@ -503,6 +504,12 @@
 	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 				  &target->thread.fpu.fcr31,
 				  fcr31_pos, fcr31_pos + sizeof(u32));
+	if (err)
+		return err;
+
+	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				  &boot_cpu_data.fpu_id,
+				  fir_pos, fir_pos + sizeof(u32));
 
 	return err;
 }
@@ -551,7 +558,8 @@
 /*
  * Copy the supplied NT_PRFPREG buffer to the floating-point context.
  * Choose the appropriate helper for general registers, and then copy
- * the FCSR register separately.
+ * the FCSR register separately.  Ignore the incoming FIR register
+ * contents though, as the register is read-only.
  *
  * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
  * which is supposed to have been guaranteed by the kernel before
@@ -565,6 +573,7 @@
 		   const void *kbuf, const void __user *ubuf)
 {
 	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
+	const int fir_pos = fcr31_pos + sizeof(u32);
 	u32 fcr31;
 	int err;
 
@@ -592,6 +601,11 @@
 		ptrace_setfcr31(target, fcr31);
 	}
 
+	if (count > 0)
+		err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+						fir_pos,
+						fir_pos + sizeof(u32));
+
 	return err;
 }
 
@@ -813,7 +827,7 @@
 			fregs = get_fpu_regs(child);
 
 #ifdef CONFIG_32BIT
-			if (test_thread_flag(TIF_32BIT_FPREGS)) {
+			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
 				/*
 				 * The odd registers are actually the high
 				 * order bits of the values stored in the even
@@ -902,7 +916,7 @@
 
 			init_fp_ctx(child);
 #ifdef CONFIG_32BIT
-			if (test_thread_flag(TIF_32BIT_FPREGS)) {
+			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
 				/*
 				 * The odd registers are actually the high
 				 * order bits of the values stored in the even
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 5fcbdcd..bc9afba 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -97,7 +97,7 @@
 				break;
 			}
 			fregs = get_fpu_regs(child);
-			if (test_thread_flag(TIF_32BIT_FPREGS)) {
+			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
 				/*
 				 * The odd registers are actually the high
 				 * order bits of the values stored in the even
@@ -204,7 +204,7 @@
 				       sizeof(child->thread.fpu));
 				child->thread.fpu.fcr31 = 0;
 			}
-			if (test_thread_flag(TIF_32BIT_FPREGS)) {
+			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
 				/*
 				 * The odd registers are actually the high
 				 * order bits of the values stored in the even
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 29ec9ab..a2c46f5 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -42,7 +42,7 @@
 	{ "cache",	  VCPU_STAT(cache_exits),	 KVM_STAT_VCPU },
 	{ "signal",	  VCPU_STAT(signal_exits),	 KVM_STAT_VCPU },
 	{ "interrupt",	  VCPU_STAT(int_exits),		 KVM_STAT_VCPU },
-	{ "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
+	{ "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
 	{ "tlbmod",	  VCPU_STAT(tlbmod_exits),	 KVM_STAT_VCPU },
 	{ "tlbmiss_ld",	  VCPU_STAT(tlbmiss_ld_exits),	 KVM_STAT_VCPU },
 	{ "tlbmiss_st",	  VCPU_STAT(tlbmiss_st_exits),	 KVM_STAT_VCPU },
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 9d0107f..43fa682 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -851,9 +851,12 @@
 	/*
 	 * Either no secondary cache or the available caches don't have the
 	 * subset property so we have to flush the primary caches
-	 * explicitly
+	 * explicitly.
+	 * If we would need IPI to perform an INDEX-type operation, then
+	 * we have to use the HIT-type alternative as IPI cannot be used
+	 * here due to interrupts possibly being disabled.
 	 */
-	if (size >= dcache_size) {
+	if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
 		r4k_blast_dcache();
 	} else {
 		R4600_HIT_CACHEOP_WAR_IMPL;
@@ -890,7 +893,7 @@
 		return;
 	}
 
-	if (size >= dcache_size) {
+	if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
 		r4k_blast_dcache();
 	} else {
 		R4600_HIT_CACHEOP_WAR_IMPL;
diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c
index 8b93730..fd26fad 100644
--- a/arch/mips/txx9/rbtx4939/setup.c
+++ b/arch/mips/txx9/rbtx4939/setup.c
@@ -186,7 +186,7 @@
 
 #define RBTX4939_MAX_7SEGLEDS	8
 
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS)
 static u8 led_val[RBTX4939_MAX_7SEGLEDS];
 struct rbtx4939_led_data {
 	struct led_classdev cdev;
@@ -261,7 +261,7 @@
 
 static void __rbtx4939_7segled_putc(unsigned int pos, unsigned char val)
 {
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS)
 	unsigned long flags;
 	local_irq_save(flags);
 	/* bit7: reserved for LED class */
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index ac8bd58..06a1a88 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -32,22 +32,12 @@
 }
 
 static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
 {
 	unsigned long int flags;
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval, ret;
 	u32 tmp;
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
-		return -EFAULT;
-
 	_futex_spin_lock_irqsave(uaddr, &flags);
 	pagefault_disable();
 
@@ -85,17 +75,9 @@
 	pagefault_enable();
 	_futex_spin_unlock_irqrestore(uaddr, &flags);
 
-	if (ret == 0) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 9d47f2e..bb69f39 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -92,7 +92,8 @@
 libfdt       := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
 libfdtheader := fdt.h libfdt.h libfdt_internal.h
 
-$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \
+$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o \
+	treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \
 	$(addprefix $(obj)/,$(libfdtheader))
 
 src-wlib-y := string.S crt0.S crtsavres.S stdio.c decompress.c main.c \
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 2a9cf84..f4c7467f 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -31,18 +31,10 @@
 	: "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
 	: "cr0", "memory")
 
-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
 
 	pagefault_disable();
 
@@ -68,17 +60,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
index 744fd54..1bcc849 100644
--- a/arch/powerpc/include/asm/irq_work.h
+++ b/arch/powerpc/include/asm/irq_work.h
@@ -5,5 +5,6 @@
 {
 	return true;
 }
+extern void arch_irq_work_raise(void);
 
 #endif /* _ASM_POWERPC_IRQ_WORK_H */
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 6ef8f0b..27843665 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -207,18 +207,18 @@
 
 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
 		return NULL;
+
+	device_lock(&dev->dev);
 	dev->error_state = pci_channel_io_frozen;
 
 	driver = eeh_pcid_get(dev);
-	if (!driver) return NULL;
+	if (!driver) goto out_no_dev;
 
 	eeh_disable_irq(dev);
 
 	if (!driver->err_handler ||
-	    !driver->err_handler->error_detected) {
-		eeh_pcid_put(dev);
-		return NULL;
-	}
+	    !driver->err_handler->error_detected)
+		goto out;
 
 	rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
 
@@ -227,7 +227,10 @@
 	if (*res == PCI_ERS_RESULT_NONE) *res = rc;
 
 	edev->in_error = true;
+out:
 	eeh_pcid_put(dev);
+out_no_dev:
+	device_unlock(&dev->dev);
 	return NULL;
 }
 
@@ -250,15 +253,14 @@
 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
 		return NULL;
 
+	device_lock(&dev->dev);
 	driver = eeh_pcid_get(dev);
-	if (!driver) return NULL;
+	if (!driver) goto out_no_dev;
 
 	if (!driver->err_handler ||
 	    !driver->err_handler->mmio_enabled ||
-	    (edev->mode & EEH_DEV_NO_HANDLER)) {
-		eeh_pcid_put(dev);
-		return NULL;
-	}
+	    (edev->mode & EEH_DEV_NO_HANDLER))
+		goto out;
 
 	rc = driver->err_handler->mmio_enabled(dev);
 
@@ -266,7 +268,10 @@
 	if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
 	if (*res == PCI_ERS_RESULT_NONE) *res = rc;
 
+out:
 	eeh_pcid_put(dev);
+out_no_dev:
+	device_unlock(&dev->dev);
 	return NULL;
 }
 
@@ -289,20 +294,20 @@
 
 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
 		return NULL;
+
+	device_lock(&dev->dev);
 	dev->error_state = pci_channel_io_normal;
 
 	driver = eeh_pcid_get(dev);
-	if (!driver) return NULL;
+	if (!driver) goto out_no_dev;
 
 	eeh_enable_irq(dev);
 
 	if (!driver->err_handler ||
 	    !driver->err_handler->slot_reset ||
 	    (edev->mode & EEH_DEV_NO_HANDLER) ||
-	    (!edev->in_error)) {
-		eeh_pcid_put(dev);
-		return NULL;
-	}
+	    (!edev->in_error))
+		goto out;
 
 	rc = driver->err_handler->slot_reset(dev);
 	if ((*res == PCI_ERS_RESULT_NONE) ||
@@ -310,7 +315,10 @@
 	if (*res == PCI_ERS_RESULT_DISCONNECT &&
 	     rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
 
+out:
 	eeh_pcid_put(dev);
+out_no_dev:
+	device_unlock(&dev->dev);
 	return NULL;
 }
 
@@ -361,10 +369,12 @@
 
 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
 		return NULL;
+
+	device_lock(&dev->dev);
 	dev->error_state = pci_channel_io_normal;
 
 	driver = eeh_pcid_get(dev);
-	if (!driver) return NULL;
+	if (!driver) goto out_no_dev;
 
 	was_in_error = edev->in_error;
 	edev->in_error = false;
@@ -374,13 +384,15 @@
 	    !driver->err_handler->resume ||
 	    (edev->mode & EEH_DEV_NO_HANDLER) || !was_in_error) {
 		edev->mode &= ~EEH_DEV_NO_HANDLER;
-		eeh_pcid_put(dev);
-		return NULL;
+		goto out;
 	}
 
 	driver->err_handler->resume(dev);
 
+out:
 	eeh_pcid_put(dev);
+out_no_dev:
+	device_unlock(&dev->dev);
 	return NULL;
 }
 
@@ -400,22 +412,25 @@
 
 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
 		return NULL;
+
+	device_lock(&dev->dev);
 	dev->error_state = pci_channel_io_perm_failure;
 
 	driver = eeh_pcid_get(dev);
-	if (!driver) return NULL;
+	if (!driver) goto out_no_dev;
 
 	eeh_disable_irq(dev);
 
 	if (!driver->err_handler ||
-	    !driver->err_handler->error_detected) {
-		eeh_pcid_put(dev);
-		return NULL;
-	}
+	    !driver->err_handler->error_detected)
+		goto out;
 
 	driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
 
+out:
 	eeh_pcid_put(dev);
+out_no_dev:
+	device_unlock(&dev->dev);
 	return NULL;
 }
 
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index f516ac5..bf0f712 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -228,14 +228,6 @@
 	unsigned short maj;
 	unsigned short min;
 
-	/* We only show online cpus: disable preempt (overzealous, I
-	 * knew) to prevent cpu going down. */
-	preempt_disable();
-	if (!cpu_online(cpu_id)) {
-		preempt_enable();
-		return 0;
-	}
-
 #ifdef CONFIG_SMP
 	pvr = per_cpu(cpu_pvr, cpu_id);
 #else
@@ -340,9 +332,6 @@
 #ifdef CONFIG_SMP
 	seq_printf(m, "\n");
 #endif
-
-	preempt_enable();
-
 	/* If this is the last cpu, print the summary */
 	if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
 		show_cpuinfo_summary(m);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 218cba2..0a2b247 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3107,15 +3107,17 @@
 		goto up_out;
 
 	psize = vma_kernel_pagesize(vma);
-	porder = __ilog2(psize);
 
 	up_read(&current->mm->mmap_sem);
 
 	/* We can handle 4k, 64k or 16M pages in the VRMA */
-	err = -EINVAL;
-	if (!(psize == 0x1000 || psize == 0x10000 ||
-	      psize == 0x1000000))
-		goto out_srcu;
+	if (psize >= 0x1000000)
+		psize = 0x1000000;
+	else if (psize >= 0x10000)
+		psize = 0x10000;
+	else
+		psize = 0x1000;
+	porder = __ilog2(psize);
 
 	/* Update VRMASD field in the LPCR */
 	senc = slb_pgsize_encoding(psize);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 55fbc0c..79a180c 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -299,7 +299,6 @@
 	stw	r12, STACK_SLOT_TRAP(r1)
 	bl	kvmhv_commence_exit
 	nop
-	lwz	r12, STACK_SLOT_TRAP(r1)
 	b	kvmhv_switch_to_host
 
 /*
@@ -1023,6 +1022,7 @@
 
 secondary_too_late:
 	li	r12, 0
+	stw	r12, STACK_SLOT_TRAP(r1)
 	cmpdi	r4, 0
 	beq	11f
 	stw	r12, VCPU_TRAP(r4)
@@ -1266,12 +1266,12 @@
 	bl	kvmhv_accumulate_time
 #endif
 
+	stw	r12, STACK_SLOT_TRAP(r1)
 	mr 	r3, r12
 	/* Increment exit count, poke other threads to exit */
 	bl	kvmhv_commence_exit
 	nop
 	ld	r9, HSTATE_KVM_VCPU(r13)
-	lwz	r12, VCPU_TRAP(r9)
 
 	/* Stop others sending VCPU interrupts to this physical CPU */
 	li	r0, -1
@@ -1549,6 +1549,7 @@
 	 * POWER7/POWER8 guest -> host partition switch code.
 	 * We don't have to lock against tlbies but we do
 	 * have to coordinate the hardware threads.
+	 * Here STACK_SLOT_TRAP(r1) contains the trap number.
 	 */
 kvmhv_switch_to_host:
 	/* Secondary threads wait for primary to do partition switch */
@@ -1599,11 +1600,11 @@
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 
 	/* If HMI, call kvmppc_realmode_hmi_handler() */
+	lwz	r12, STACK_SLOT_TRAP(r1)
 	cmpwi	r12, BOOK3S_INTERRUPT_HMI
 	bne	27f
 	bl	kvmppc_realmode_hmi_handler
 	nop
-	li	r12, BOOK3S_INTERRUPT_HMI
 	/*
 	 * At this point kvmppc_realmode_hmi_handler would have resync-ed
 	 * the TB. Hence it is not required to subtract guest timebase
@@ -1678,6 +1679,7 @@
 	li	r0, KVM_GUEST_MODE_NONE
 	stb	r0, HSTATE_IN_GUEST(r13)
 
+	lwz	r12, STACK_SLOT_TRAP(r1)	/* return trap # in r12 */
 	ld	r0, SFS+PPC_LR_STKOFF(r1)
 	addi	r1, r1, SFS
 	mtlr	r0
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index a51c188..6cff96e 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -551,7 +551,7 @@
 	nid = of_node_to_nid_single(cpu);
 
 out_present:
-	if (nid < 0 || !node_online(nid))
+	if (nid < 0 || !node_possible(nid))
 		nid = first_online_node;
 
 	map_cpu_to_node(lcpu, nid);
@@ -904,6 +904,32 @@
 	NODE_DATA(nid)->node_spanned_pages = spanned_pages;
 }
 
+static void __init find_possible_nodes(void)
+{
+	struct device_node *rtas;
+	u32 numnodes, i;
+
+	if (min_common_depth <= 0)
+		return;
+
+	rtas = of_find_node_by_path("/rtas");
+	if (!rtas)
+		return;
+
+	if (of_property_read_u32_index(rtas,
+				"ibm,max-associativity-domains",
+				min_common_depth, &numnodes))
+		goto out;
+
+	for (i = 0; i < numnodes; i++) {
+		if (!node_possible(i))
+			node_set(i, node_possible_map);
+	}
+
+out:
+	of_node_put(rtas);
+}
+
 void __init initmem_init(void)
 {
 	int nid, cpu;
@@ -917,12 +943,15 @@
 	memblock_dump_all();
 
 	/*
-	 * Reduce the possible NUMA nodes to the online NUMA nodes,
-	 * since we do not support node hotplug. This ensures that  we
-	 * lower the maximum NUMA node ID to what is actually present.
+	 * Modify the set of possible NUMA nodes to reflect information
+	 * available about the set of online nodes, and the set of nodes
+	 * that we expect to make use of for this platform's affinity
+	 * calculations.
 	 */
 	nodes_and(node_possible_map, node_possible_map, node_online_map);
 
+	find_possible_nodes();
+
 	for_each_online_node(nid) {
 		unsigned long start_pfn, end_pfn;
 
@@ -1274,6 +1303,40 @@
 	return rc;
 }
 
+static inline int find_and_online_cpu_nid(int cpu)
+{
+	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
+	int new_nid;
+
+	/* Use associativity from first thread for all siblings */
+	vphn_get_associativity(cpu, associativity);
+	new_nid = associativity_to_nid(associativity);
+	if (new_nid < 0 || !node_possible(new_nid))
+		new_nid = first_online_node;
+
+	if (NODE_DATA(new_nid) == NULL) {
+#ifdef CONFIG_MEMORY_HOTPLUG
+		/*
+		 * Need to ensure that NODE_DATA is initialized for a node from
+		 * available memory (see memblock_alloc_try_nid). If unable to
+		 * init the node, then default to nearest node that has memory
+		 * installed.
+		 */
+		if (try_online_node(new_nid))
+			new_nid = first_online_node;
+#else
+		/*
+		 * Default to using the nearest node that has memory installed.
+		 * Otherwise, it would be necessary to patch the kernel MM code
+		 * to deal with more memoryless-node error conditions.
+		 */
+		new_nid = first_online_node;
+#endif
+	}
+
+	return new_nid;
+}
+
 /*
  * Update the CPU maps and sysfs entries for a single CPU when its NUMA
  * characteristics change. This function doesn't perform any locking and is
@@ -1339,7 +1402,6 @@
 {
 	unsigned int cpu, sibling, changed = 0;
 	struct topology_update_data *updates, *ud;
-	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
 	cpumask_t updated_cpus;
 	struct device *dev;
 	int weight, new_nid, i = 0;
@@ -1374,11 +1436,7 @@
 			continue;
 		}
 
-		/* Use associativity from first thread for all siblings */
-		vphn_get_associativity(cpu, associativity);
-		new_nid = associativity_to_nid(associativity);
-		if (new_nid < 0 || !node_online(new_nid))
-			new_nid = first_online_node;
+		new_nid = find_and_online_cpu_nid(cpu);
 
 		if (new_nid == numa_cpu_lookup_table[cpu]) {
 			cpumask_andnot(&cpu_associativity_changes_mask,
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 7e706f3..9c58194 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -329,6 +329,9 @@
 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
 			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
 			break;
+		case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
+			PPC_LWZ_OFFS(r_A, r_skb, K);
+			break;
 		case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
 			PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
 			break;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index bf94962..771edff 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -448,6 +448,16 @@
 				/* invalid entry */
 				continue;
 
+			/*
+			 * BHRB rolling buffer could very much contain the kernel
+			 * addresses at this point. Check the privileges before
+			 * exporting it to userspace (avoid exposure of regions
+			 * where we could have speculative execution)
+			 */
+			if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) &&
+				is_kernel_addr(addr))
+				continue;
+
 			/* Branches are read most recent first (ie. mfbhrb 0 is
 			 * the most recent branch).
 			 * There are two types of valid entries:
@@ -1188,6 +1198,7 @@
 		 */
 		write_mmcr0(cpuhw, val);
 		mb();
+		isync();
 
 		/*
 		 * Disable instruction sampling if it was enabled
@@ -1196,12 +1207,26 @@
 			mtspr(SPRN_MMCRA,
 			      cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
 			mb();
+			isync();
 		}
 
 		cpuhw->disabled = 1;
 		cpuhw->n_added = 0;
 
 		ebb_switch_out(mmcr0);
+
+#ifdef CONFIG_PPC64
+		/*
+		 * These are readable by userspace, may contain kernel
+		 * addresses and are not switched by context switch, so clear
+		 * them now to avoid leaking anything to userspace in general
+		 * including to another process.
+		 */
+		if (ppmu->flags & PPMU_ARCH_207S) {
+			mtspr(SPRN_SDAR, 0);
+			mtspr(SPRN_SIAR, 0);
+		}
+#endif
 	}
 
 	local_irq_restore(flags);
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
index 1bceb95..5584247 100644
--- a/arch/powerpc/platforms/powernv/opal-nvram.c
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
@@ -44,6 +44,10 @@
 	return count;
 }
 
+/*
+ * This can be called in the panic path with interrupts off, so use
+ * mdelay in that case.
+ */
 static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
 {
 	s64 rc = OPAL_BUSY;
@@ -58,10 +62,16 @@
 	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
 		rc = opal_write_nvram(__pa(buf), count, off);
 		if (rc == OPAL_BUSY_EVENT) {
-			msleep(OPAL_BUSY_DELAY_MS);
+			if (in_interrupt() || irqs_disabled())
+				mdelay(OPAL_BUSY_DELAY_MS);
+			else
+				msleep(OPAL_BUSY_DELAY_MS);
 			opal_poll_events(NULL);
 		} else if (rc == OPAL_BUSY) {
-			msleep(OPAL_BUSY_DELAY_MS);
+			if (in_interrupt() || irqs_disabled())
+				mdelay(OPAL_BUSY_DELAY_MS);
+			else
+				msleep(OPAL_BUSY_DELAY_MS);
 		}
 	}
 
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
index f886886..aa2a513 100644
--- a/arch/powerpc/platforms/powernv/opal-rtc.c
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -48,10 +48,12 @@
 
 	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
 		rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
-		if (rc == OPAL_BUSY_EVENT)
+		if (rc == OPAL_BUSY_EVENT) {
+			mdelay(OPAL_BUSY_DELAY_MS);
 			opal_poll_events(NULL);
-		else if (rc == OPAL_BUSY)
-			mdelay(10);
+		} else if (rc == OPAL_BUSY) {
+			mdelay(OPAL_BUSY_DELAY_MS);
+		}
 	}
 	if (rc != OPAL_SUCCESS)
 		return 0;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index b9aac95..f37567e 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -626,7 +626,7 @@
 	int i;
 	u32 mask = 0;
 
-	for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)
+	for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1)
 		mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
 	return mask;
 }
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 9aa0d04..1c4a595 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -118,6 +118,7 @@
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_CPU_AUTOPROBE
 	select GENERIC_CPU_DEVICES if !SMP
+	select GENERIC_CPU_VULNERABILITIES
 	select GENERIC_FIND_FIRST_BIT
 	select GENERIC_SMP_IDLE_THREAD
 	select GENERIC_TIME_VSYSCALL
@@ -704,6 +705,51 @@
 
 	  If unsure, say Y.
 
+config KERNEL_NOBP
+	def_bool n
+	prompt "Enable modified branch prediction for the kernel by default"
+	help
+	  If this option is selected the kernel will switch to a modified
+	  branch prediction mode if the firmware interface is available.
+	  The modified branch prediction mode improves the behaviour in
+	  regard to speculative execution.
+
+	  With the option enabled the kernel parameter "nobp=0" or "nospec"
+	  can be used to run the kernel in the normal branch prediction mode.
+
+	  With the option disabled the modified branch prediction mode is
+	  enabled with the "nobp=1" kernel parameter.
+
+	  If unsure, say N.
+
+config EXPOLINE
+	def_bool n
+	prompt "Avoid speculative indirect branches in the kernel"
+	help
+	  Compile the kernel with the expoline compiler options to guard
+	  against kernel-to-user data leaks by avoiding speculative indirect
+	  branches.
+	  Requires a compiler with -mindirect-branch=thunk support for full
+	  protection. The kernel may run slower.
+
+	  If unsure, say N.
+
+choice
+	prompt "Expoline default"
+	depends on EXPOLINE
+	default EXPOLINE_FULL
+
+config EXPOLINE_OFF
+	bool "spectre_v2=off"
+
+config EXPOLINE_AUTO
+	bool "spectre_v2=auto"
+
+config EXPOLINE_FULL
+	bool "spectre_v2=on"
+
+endchoice
+
 endmenu
 
 menu "Power Management"
@@ -753,6 +799,7 @@
 config SHARED_KERNEL
 	bool "VM shared kernel support"
 	depends on !JUMP_LABEL
+	depends on !ALTERNATIVES
 	help
 	  Select this option, if you want to share the text segment of the
 	  Linux kernel between different VM guests. This reduces memory
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 54e0052..bef67c0 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -79,6 +79,16 @@
 cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
 endif
 
+ifdef CONFIG_EXPOLINE
+  ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y)
+    CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
+    CC_FLAGS_EXPOLINE += -mfunction-return=thunk
+    CC_FLAGS_EXPOLINE += -mindirect-branch-table
+    export CC_FLAGS_EXPOLINE
+    cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
+  endif
+endif
+
 ifdef CONFIG_FUNCTION_TRACER
 # make use of hotpatch feature if the compiler supports it
 cc_hotpatch	:= -mhotpatch=0,3
diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S
index 8013989..096affb 100644
--- a/arch/s390/crypto/crc32be-vx.S
+++ b/arch/s390/crypto/crc32be-vx.S
@@ -12,6 +12,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/nospec-insn.h>
 #include <asm/vx-insn.h>
 
 /* Vector register range containing CRC-32 constants */
@@ -66,6 +67,8 @@
 
 .previous
 
+	GEN_BR_THUNK %r14
+
 .text
 /*
  * The CRC-32 function(s) use these calling conventions:
@@ -202,6 +205,6 @@
 
 .Ldone:
 	VLGVF	%r2,%v2,3
-	br	%r14
+	BR_EX	%r14
 
 .previous
diff --git a/arch/s390/crypto/crc32le-vx.S b/arch/s390/crypto/crc32le-vx.S
index 17f2504..8dc98c1 100644
--- a/arch/s390/crypto/crc32le-vx.S
+++ b/arch/s390/crypto/crc32le-vx.S
@@ -13,6 +13,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/nospec-insn.h>
 #include <asm/vx-insn.h>
 
 /* Vector register range containing CRC-32 constants */
@@ -75,6 +76,7 @@
 
 .previous
 
+	GEN_BR_THUNK %r14
 
 .text
 
@@ -263,6 +265,6 @@
 
 .Ldone:
 	VLGVF	%r2,%v2,2
-	br	%r14
+	BR_EX	%r14
 
 .previous
diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h
new file mode 100644
index 0000000..955d620
--- /dev/null
+++ b/arch/s390/include/asm/alternative-asm.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_ALTERNATIVE_ASM_H
+#define _ASM_S390_ALTERNATIVE_ASM_H
+
+#ifdef __ASSEMBLY__
+
+/*
+ * Check the length of an instruction sequence. The length may not be larger
+ * than 254 bytes and it has to be divisible by 2.
+ */
+.macro alt_len_check start,end
+	.if ( \end - \start ) > 254
+	.error "cpu alternatives does not support instructions blocks > 254 bytes\n"
+	.endif
+	.if ( \end - \start ) % 2
+	.error "cpu alternatives instructions length is odd\n"
+	.endif
+.endm
+
+/*
+ * Issue one struct alt_instr descriptor entry (need to put it into
+ * the section .altinstructions, see below). This entry contains
+ * enough information for the alternatives patching code to patch an
+ * instruction. See apply_alternatives().
+ */
+.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature
+	.long	\orig_start - .
+	.long	\alt_start - .
+	.word	\feature
+	.byte	\orig_end - \orig_start
+	.byte	\alt_end - \alt_start
+.endm
+
+/*
+ * Fill up @bytes with nops. The macro emits 6-byte nop instructions
+ * for the bulk of the area, possibly followed by a 4-byte and/or
+ * a 2-byte nop if the size of the area is not divisible by 6.
+ */
+.macro alt_pad_fill bytes
+	.fill	( \bytes ) / 6, 6, 0xc0040000
+	.fill	( \bytes ) % 6 / 4, 4, 0x47000000
+	.fill	( \bytes ) % 6 % 4 / 2, 2, 0x0700
+.endm
+
+/*
+ * Fill up @bytes with nops. If the number of bytes is larger
+ * than 6, emit a jg instruction to branch over all nops, then
+ * fill an area of size (@bytes - 6) with nop instructions.
+ */
+.macro alt_pad bytes
+	.if ( \bytes > 0 )
+	.if ( \bytes > 6 )
+	jg	. + \bytes
+	alt_pad_fill \bytes - 6
+	.else
+	alt_pad_fill \bytes
+	.endif
+	.endif
+.endm
+
+/*
+ * Define an alternative between two instructions. If @feature is
+ * present, early code in apply_alternatives() replaces @oldinstr with
+ * @newinstr. ".skip" directive takes care of proper instruction padding
+ * in case @newinstr is longer than @oldinstr.
+ */
+.macro ALTERNATIVE oldinstr, newinstr, feature
+	.pushsection .altinstr_replacement,"ax"
+770:	\newinstr
+771:	.popsection
+772:	\oldinstr
+773:	alt_len_check 770b, 771b
+	alt_len_check 772b, 773b
+	alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) )
+774:	.pushsection .altinstructions,"a"
+	alt_entry 772b, 774b, 770b, 771b, \feature
+	.popsection
+.endm
+
+/*
+ * Define an alternative between two instructions. If @feature is
+ * present, early code in apply_alternatives() replaces @oldinstr with
+ * @newinstr. ".skip" directive takes care of proper instruction padding
+ * in case @newinstr is longer than @oldinstr.
+ */
+.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
+	.pushsection .altinstr_replacement,"ax"
+770:	\newinstr1
+771:	\newinstr2
+772:	.popsection
+773:	\oldinstr
+774:	alt_len_check 770b, 771b
+	alt_len_check 771b, 772b
+	alt_len_check 773b, 774b
+	.if ( 771b - 770b > 772b - 771b )
+	alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) )
+	.else
+	alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) )
+	.endif
+775:	.pushsection .altinstructions,"a"
+	alt_entry 773b, 775b, 770b, 771b,\feature1
+	alt_entry 773b, 775b, 771b, 772b,\feature2
+	.popsection
+.endm
+
+#endif	/*  __ASSEMBLY__  */
+
+#endif /* _ASM_S390_ALTERNATIVE_ASM_H */
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
new file mode 100644
index 0000000..a720020
--- /dev/null
+++ b/arch/s390/include/asm/alternative.h
@@ -0,0 +1,149 @@
+#ifndef _ASM_S390_ALTERNATIVE_H
+#define _ASM_S390_ALTERNATIVE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+struct alt_instr {
+	s32 instr_offset;	/* original instruction */
+	s32 repl_offset;	/* offset to replacement instruction */
+	u16 facility;		/* facility bit set for replacement */
+	u8  instrlen;		/* length of original instruction */
+	u8  replacementlen;	/* length of new instruction */
+} __packed;
+
+void apply_alternative_instructions(void);
+void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+
+/*
+ * |661:       |662:	  |6620      |663:
+ * +-----------+---------------------+
+ * | oldinstr  | oldinstr_padding    |
+ * |	       +----------+----------+
+ * |	       |	  |	     |
+ * |	       | >6 bytes |6/4/2 nops|
+ * |	       |6 bytes jg----------->
+ * +-----------+---------------------+
+ *		 ^^ static padding ^^
+ *
+ * .altinstr_replacement section
+ * +---------------------+-----------+
+ * |6641:			     |6651:
+ * | alternative instr 1	     |
+ * +-----------+---------+- - - - - -+
+ * |6642:		 |6652:      |
+ * | alternative instr 2 | padding
+ * +---------------------+- - - - - -+
+ *			  ^ runtime ^
+ *
+ * .altinstructions section
+ * +---------------------------------+
+ * | alt_instr entries for each      |
+ * | alternative instr		     |
+ * +---------------------------------+
+ */
+
+#define b_altinstr(num)	"664"#num
+#define e_altinstr(num)	"665"#num
+
+#define e_oldinstr_pad_end	"663"
+#define oldinstr_len		"662b-661b"
+#define oldinstr_total_len	e_oldinstr_pad_end"b-661b"
+#define altinstr_len(num)	e_altinstr(num)"b-"b_altinstr(num)"b"
+#define oldinstr_pad_len(num) \
+	"-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
+	"((" altinstr_len(num) ")-(" oldinstr_len "))"
+
+#define INSTR_LEN_SANITY_CHECK(len)					\
+	".if " len " > 254\n"						\
+	"\t.error \"cpu alternatives does not support instructions "	\
+		"blocks > 254 bytes\"\n"				\
+	".endif\n"							\
+	".if (" len ") %% 2\n"						\
+	"\t.error \"cpu alternatives instructions length is odd\"\n"	\
+	".endif\n"
+
+#define OLDINSTR_PADDING(oldinstr, num)					\
+	".if " oldinstr_pad_len(num) " > 6\n"				\
+	"\tjg " e_oldinstr_pad_end "f\n"				\
+	"6620:\n"							\
+	"\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
+	".else\n"							\
+	"\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n"	\
+	"\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n"	\
+	"\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n"	\
+	".endif\n"
+
+#define OLDINSTR(oldinstr, num)						\
+	"661:\n\t" oldinstr "\n662:\n"					\
+	OLDINSTR_PADDING(oldinstr, num)					\
+	e_oldinstr_pad_end ":\n"					\
+	INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define OLDINSTR_2(oldinstr, num1, num2)				\
+	"661:\n\t" oldinstr "\n662:\n"					\
+	".if " altinstr_len(num1) " < " altinstr_len(num2) "\n"		\
+	OLDINSTR_PADDING(oldinstr, num2)				\
+	".else\n"							\
+	OLDINSTR_PADDING(oldinstr, num1)				\
+	".endif\n"							\
+	e_oldinstr_pad_end ":\n"					\
+	INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define ALTINSTR_ENTRY(facility, num)					\
+	"\t.long 661b - .\n"			/* old instruction */	\
+	"\t.long " b_altinstr(num)"b - .\n"	/* alt instruction */	\
+	"\t.word " __stringify(facility) "\n"	/* facility bit    */	\
+	"\t.byte " oldinstr_total_len "\n"	/* source len	   */	\
+	"\t.byte " altinstr_len(num) "\n"	/* alt instruction len */
+
+#define ALTINSTR_REPLACEMENT(altinstr, num)	/* replacement */	\
+	b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n"	\
+	INSTR_LEN_SANITY_CHECK(altinstr_len(num))
+
+/* alternative assembly primitive: */
+#define ALTERNATIVE(oldinstr, altinstr, facility) \
+	".pushsection .altinstr_replacement, \"ax\"\n"			\
+	ALTINSTR_REPLACEMENT(altinstr, 1)				\
+	".popsection\n"							\
+	OLDINSTR(oldinstr, 1)						\
+	".pushsection .altinstructions,\"a\"\n"				\
+	ALTINSTR_ENTRY(facility, 1)					\
+	".popsection\n"
+
+#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
+	".pushsection .altinstr_replacement, \"ax\"\n"			\
+	ALTINSTR_REPLACEMENT(altinstr1, 1)				\
+	ALTINSTR_REPLACEMENT(altinstr2, 2)				\
+	".popsection\n"							\
+	OLDINSTR_2(oldinstr, 1, 2)					\
+	".pushsection .altinstructions,\"a\"\n"				\
+	ALTINSTR_ENTRY(facility1, 1)					\
+	ALTINSTR_ENTRY(facility2, 2)					\
+	".popsection\n"
+
+/*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * oldinstr is padded with jump and nops at compile time if altinstr is
+ * longer. altinstr is padded with jump and nops at run-time during patching.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, altinstr, facility)			\
+	asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
+
+#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
+	asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1,	    \
+				   altinstr2, facility2) ::: "memory")
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_ALTERNATIVE_H */
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 5c8db3c..03b2e5b 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -48,6 +48,30 @@
 #define __smp_mb__before_atomic()	barrier()
 #define __smp_mb__after_atomic()	barrier()
 
+/**
+ * array_index_mask_nospec - generate a mask for array_idx() that is
+ * ~0UL when the bounds check succeeds and 0 otherwise
+ * @index: array element index
+ * @size: number of elements in array
+ */
+#define array_index_mask_nospec array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long index,
+						    unsigned long size)
+{
+	unsigned long mask;
+
+	if (__builtin_constant_p(size) && size > 0) {
+		asm("	clgr	%2,%1\n"
+		    "	slbgr	%0,%0\n"
+		    :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
+		return mask;
+	}
+	asm("	clgr	%1,%2\n"
+	    "	slbgr	%0,%0\n"
+	    :"=d" (mask) : "d" (size), "d" (index) :"cc");
+	return ~mask;
+}
+
 #include <asm-generic/barrier.h>
 
 #endif /* __ASM_BARRIER_H */
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 09b406d..5811e78 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -15,7 +15,25 @@
 #include <linux/preempt.h>
 #include <asm/lowcore.h>
 
-#define MAX_FACILITY_BIT (256*8)	/* stfle_fac_list has 256 bytes */
+#define MAX_FACILITY_BIT (sizeof(((struct lowcore *)0)->stfle_fac_list) * 8)
+
+static inline void __set_facility(unsigned long nr, void *facilities)
+{
+	unsigned char *ptr = (unsigned char *) facilities;
+
+	if (nr >= MAX_FACILITY_BIT)
+		return;
+	ptr[nr >> 3] |= 0x80 >> (nr & 7);
+}
+
+static inline void __clear_facility(unsigned long nr, void *facilities)
+{
+	unsigned char *ptr = (unsigned char *) facilities;
+
+	if (nr >= MAX_FACILITY_BIT)
+		return;
+	ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
+}
 
 static inline int __test_facility(unsigned long nr, void *facilities)
 {
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index a4811aa..8f8eec9e 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -21,17 +21,12 @@
 		: "0" (-EFAULT), "d" (oparg), "a" (uaddr),		\
 		  "m" (*uaddr) : "cc");
 
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, newval, ret;
 
 	load_kernel_asce();
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
 
 	pagefault_disable();
 	switch (op) {
@@ -60,17 +55,9 @@
 	}
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index a41faf3..5792590 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -181,7 +181,8 @@
 	__u16	ipa;			/* 0x0056 */
 	__u32	ipb;			/* 0x0058 */
 	__u32	scaoh;			/* 0x005c */
-	__u8	reserved60;		/* 0x0060 */
+#define FPF_BPBC 	0x20
+	__u8	fpf;			/* 0x0060 */
 	__u8	ecb;			/* 0x0061 */
 	__u8    ecb2;                   /* 0x0062 */
 #define ECB3_AES 0x04
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 7b93b78..ad4e0ce 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -135,7 +135,9 @@
 	/* Per cpu primary space access list */
 	__u32	paste[16];			/* 0x0400 */
 
-	__u8	pad_0x04c0[0x0e00-0x0440];	/* 0x0440 */
+	/* br %r1 trampoline */
+	__u16	br_r1_trampoline;		/* 0x0440 */
+	__u8	pad_0x0442[0x0e00-0x0442];	/* 0x0442 */
 
 	/*
 	 * 0xe00 contains the address of the IPL Parameter Information
@@ -150,7 +152,8 @@
 	__u8	pad_0x0e20[0x0f00-0x0e20];	/* 0x0e20 */
 
 	/* Extended facility list */
-	__u64	stfle_fac_list[32];		/* 0x0f00 */
+	__u64	stfle_fac_list[16];		/* 0x0f00 */
+	__u64	alt_stfle_fac_list[16];		/* 0x0f80 */
 	__u8	pad_0x1000[0x11b0-0x1000];	/* 0x1000 */
 
 	/* Pointer to vector register save area */
diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
new file mode 100644
index 0000000..b4bd8c4
--- /dev/null
+++ b/arch/s390/include/asm/nospec-branch.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_EXPOLINE_H
+#define _ASM_S390_EXPOLINE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+extern int nospec_disable;
+
+void nospec_init_branches(void);
+void nospec_auto_detect(void);
+void nospec_revert(s32 *start, s32 *end);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_EXPOLINE_H */
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
new file mode 100644
index 0000000..9a56e73
--- /dev/null
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_NOSPEC_ASM_H
+#define _ASM_S390_NOSPEC_ASM_H
+
+#include <asm/alternative-asm.h>
+#include <asm/asm-offsets.h>
+
+#ifdef __ASSEMBLY__
+
+#ifdef CONFIG_EXPOLINE
+
+_LC_BR_R1 = __LC_BR_R1
+
+/*
+ * The expoline macros are used to create thunks in the same format
+ * as gcc generates them. The 'comdat' section flag makes sure that
+ * the various thunks are merged into a single copy.
+ */
+	.macro __THUNK_PROLOG_NAME name
+	.pushsection .text.\name,"axG",@progbits,\name,comdat
+	.globl \name
+	.hidden \name
+	.type \name,@function
+\name:
+	.cfi_startproc
+	.endm
+
+	.macro __THUNK_EPILOG
+	.cfi_endproc
+	.popsection
+	.endm
+
+	.macro __THUNK_PROLOG_BR r1,r2
+	__THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1
+	.endm
+
+	.macro __THUNK_PROLOG_BC d0,r1,r2
+	__THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1
+	.endm
+
+	.macro __THUNK_BR r1,r2
+	jg	__s390x_indirect_jump_r\r2\()use_r\r1
+	.endm
+
+	.macro __THUNK_BC d0,r1,r2
+	jg	__s390x_indirect_branch_\d0\()_\r2\()use_\r1
+	.endm
+
+	.macro __THUNK_BRASL r1,r2,r3
+	brasl	\r1,__s390x_indirect_jump_r\r3\()use_r\r2
+	.endm
+
+	.macro	__DECODE_RR expand,reg,ruse
+	.set __decode_fail,1
+	.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+	.ifc \reg,%r\r1
+	.irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+	.ifc \ruse,%r\r2
+	\expand \r1,\r2
+	.set __decode_fail,0
+	.endif
+	.endr
+	.endif
+	.endr
+	.if __decode_fail == 1
+	.error "__DECODE_RR failed"
+	.endif
+	.endm
+
+	.macro	__DECODE_RRR expand,rsave,rtarget,ruse
+	.set __decode_fail,1
+	.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+	.ifc \rsave,%r\r1
+	.irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+	.ifc \rtarget,%r\r2
+	.irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+	.ifc \ruse,%r\r3
+	\expand \r1,\r2,\r3
+	.set __decode_fail,0
+	.endif
+	.endr
+	.endif
+	.endr
+	.endif
+	.endr
+	.if __decode_fail == 1
+	.error "__DECODE_RRR failed"
+	.endif
+	.endm
+
+	.macro	__DECODE_DRR expand,disp,reg,ruse
+	.set __decode_fail,1
+	.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+	.ifc \reg,%r\r1
+	.irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+	.ifc \ruse,%r\r2
+	\expand \disp,\r1,\r2
+	.set __decode_fail,0
+	.endif
+	.endr
+	.endif
+	.endr
+	.if __decode_fail == 1
+	.error "__DECODE_DRR failed"
+	.endif
+	.endm
+
+	.macro __THUNK_EX_BR reg,ruse
+	# Be very careful when adding instructions to this macro!
+	# The ALTERNATIVE replacement code has a .+10 which targets
+	# the "br \reg" after the code has been patched.
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+	exrl	0,555f
+	j	.
+#else
+	.ifc \reg,%r1
+	ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35
+	j	.
+	.else
+	larl	\ruse,555f
+	ex	0,0(\ruse)
+	j	.
+	.endif
+#endif
+555:	br	\reg
+	.endm
+
+	.macro __THUNK_EX_BC disp,reg,ruse
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+	exrl	0,556f
+	j	.
+#else
+	larl	\ruse,556f
+	ex	0,0(\ruse)
+	j	.
+#endif
+556:	b	\disp(\reg)
+	.endm
+
+	.macro GEN_BR_THUNK reg,ruse=%r1
+	__DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse
+	__THUNK_EX_BR \reg,\ruse
+	__THUNK_EPILOG
+	.endm
+
+	.macro GEN_B_THUNK disp,reg,ruse=%r1
+	__DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse
+	__THUNK_EX_BC \disp,\reg,\ruse
+	__THUNK_EPILOG
+	.endm
+
+	.macro BR_EX reg,ruse=%r1
+557:	__DECODE_RR __THUNK_BR,\reg,\ruse
+	.pushsection .s390_indirect_branches,"a",@progbits
+	.long	557b-.
+	.popsection
+	.endm
+
+	 .macro B_EX disp,reg,ruse=%r1
+558:	__DECODE_DRR __THUNK_BC,\disp,\reg,\ruse
+	.pushsection .s390_indirect_branches,"a",@progbits
+	.long	558b-.
+	.popsection
+	.endm
+
+	.macro BASR_EX rsave,rtarget,ruse=%r1
+559:	__DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse
+	.pushsection .s390_indirect_branches,"a",@progbits
+	.long	559b-.
+	.popsection
+	.endm
+
+#else
+	.macro GEN_BR_THUNK reg,ruse=%r1
+	.endm
+
+	.macro GEN_B_THUNK disp,reg,ruse=%r1
+	.endm
+
+	 .macro BR_EX reg,ruse=%r1
+	br	\reg
+	.endm
+
+	 .macro B_EX disp,reg,ruse=%r1
+	b	\disp(\reg)
+	.endm
+
+	.macro BASR_EX rsave,rtarget,ruse=%r1
+	basr	\rsave,\rtarget
+	.endm
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_NOSPEC_ASM_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 6bcbbec..d584212 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -84,6 +84,7 @@
 extern const struct seq_operations cpuinfo_op;
 extern int sysctl_ieee_emulation_warnings;
 extern void execve_tail(void);
+extern void __bpon(void);
 
 /*
  * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
@@ -359,6 +360,9 @@
 	memcpy_absolute(&(dest), &__tmp, sizeof(__tmp));	\
 }
 
+extern int s390_isolate_bp(void);
+extern int s390_isolate_bp_guest(void);
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __ASM_S390_PROCESSOR_H */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index f15c039..84f2ae4 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -79,6 +79,8 @@
 #define TIF_SECCOMP		5	/* secure computing */
 #define TIF_SYSCALL_TRACEPOINT	6	/* syscall tracepoint instrumentation */
 #define TIF_UPROBE		7	/* breakpointed or single-stepping */
+#define TIF_ISOLATE_BP		8	/* Run process with isolated BP */
+#define TIF_ISOLATE_BP_GUEST	9	/* Run KVM guests with isolated BP */
 #define TIF_31BIT		16	/* 32bit process */
 #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	18	/* restore signal mask in do_signal() */
@@ -94,6 +96,8 @@
 #define _TIF_SECCOMP		_BITUL(TIF_SECCOMP)
 #define _TIF_SYSCALL_TRACEPOINT	_BITUL(TIF_SYSCALL_TRACEPOINT)
 #define _TIF_UPROBE		_BITUL(TIF_UPROBE)
+#define _TIF_ISOLATE_BP		_BITUL(TIF_ISOLATE_BP)
+#define _TIF_ISOLATE_BP_GUEST	_BITUL(TIF_ISOLATE_BP_GUEST)
 #define _TIF_31BIT		_BITUL(TIF_31BIT)
 #define _TIF_SINGLE_STEP	_BITUL(TIF_SINGLE_STEP)
 
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index a2ffec4..81c02e1 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -197,6 +197,7 @@
 #define KVM_SYNC_VRS    (1UL << 6)
 #define KVM_SYNC_RICCB  (1UL << 7)
 #define KVM_SYNC_FPRS   (1UL << 8)
+#define KVM_SYNC_BPBC	(1UL << 10)
 /* definition of registers in kvm_run */
 struct kvm_sync_regs {
 	__u64 prefix;	/* prefix register */
@@ -217,7 +218,9 @@
 	};
 	__u8  reserved[512];	/* for future vector expansion */
 	__u32 fpc;		/* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
-	__u8 padding[52];	/* riccb needs to be 64byte aligned */
+	__u8 bpbc : 1;		/* bp mode */
+	__u8 reserved2 : 7;
+	__u8 padding1[51];	/* riccb needs to be 64byte aligned */
 	__u8 riccb[64];		/* runtime instrumentation controls block */
 };
 
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 1f0fe98..5b13997 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -42,6 +42,7 @@
 CFLAGS_REMOVE_sclp.o	+= $(CC_FLAGS_MARCH)
 CFLAGS_sclp.o		+= -march=z900
 CFLAGS_REMOVE_als.o	+= $(CC_FLAGS_MARCH)
+CFLAGS_REMOVE_als.o	+= $(CC_FLAGS_EXPOLINE)
 CFLAGS_als.o		+= -march=z900
 AFLAGS_REMOVE_head.o	+= $(CC_FLAGS_MARCH)
 AFLAGS_head.o		+= -march=z900
@@ -57,10 +58,14 @@
 obj-y	+= debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o als.o
 obj-y	+= sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
 obj-y	+= runtime_instr.o cache.o fpu.o dumpstack.o
-obj-y	+= entry.o reipl.o relocate_kernel.o
+obj-y	+= entry.o reipl.o relocate_kernel.o alternative.o
+obj-y	+= nospec-branch.o
 
 extra-y				+= head.o head64.o vmlinux.lds
 
+obj-$(CONFIG_SYSFS)		+= nospec-sysfs.o
+CFLAGS_REMOVE_nospec-branch.o	+= $(CC_FLAGS_EXPOLINE)
+
 obj-$(CONFIG_MODULES)		+= module.o
 obj-$(CONFIG_SMP)		+= smp.o
 obj-$(CONFIG_SCHED_TOPOLOGY)	+= topology.o
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
new file mode 100644
index 0000000..b57b293
--- /dev/null
+++ b/arch/s390/kernel/alternative.c
@@ -0,0 +1,112 @@
+#include <linux/module.h>
+#include <asm/alternative.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
+
+#define MAX_PATCH_LEN (255 - 1)
+
+static int __initdata_or_module alt_instr_disabled;
+
+static int __init disable_alternative_instructions(char *str)
+{
+	alt_instr_disabled = 1;
+	return 0;
+}
+
+early_param("noaltinstr", disable_alternative_instructions);
+
+struct brcl_insn {
+	u16 opc;
+	s32 disp;
+} __packed;
+
+static u16 __initdata_or_module nop16 = 0x0700;
+static u32 __initdata_or_module nop32 = 0x47000000;
+static struct brcl_insn __initdata_or_module nop48 = {
+	0xc004, 0
+};
+
+static const void *nops[] __initdata_or_module = {
+	&nop16,
+	&nop32,
+	&nop48
+};
+
+static void __init_or_module add_jump_padding(void *insns, unsigned int len)
+{
+	struct brcl_insn brcl = {
+		0xc0f4,
+		len / 2
+	};
+
+	memcpy(insns, &brcl, sizeof(brcl));
+	insns += sizeof(brcl);
+	len -= sizeof(brcl);
+
+	while (len > 0) {
+		memcpy(insns, &nop16, 2);
+		insns += 2;
+		len -= 2;
+	}
+}
+
+static void __init_or_module add_padding(void *insns, unsigned int len)
+{
+	if (len > 6)
+		add_jump_padding(insns, len);
+	else if (len >= 2)
+		memcpy(insns, nops[len / 2 - 1], len);
+}
+
+static void __init_or_module __apply_alternatives(struct alt_instr *start,
+						  struct alt_instr *end)
+{
+	struct alt_instr *a;
+	u8 *instr, *replacement;
+	u8 insnbuf[MAX_PATCH_LEN];
+
+	/*
+	 * The scan order should be from start to end. A later scanned
+	 * alternative code can overwrite previously scanned alternative code.
+	 */
+	for (a = start; a < end; a++) {
+		int insnbuf_sz = 0;
+
+		instr = (u8 *)&a->instr_offset + a->instr_offset;
+		replacement = (u8 *)&a->repl_offset + a->repl_offset;
+
+		if (!__test_facility(a->facility,
+				     S390_lowcore.alt_stfle_fac_list))
+			continue;
+
+		if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
+			WARN_ONCE(1, "cpu alternatives instructions length is "
+				     "odd, skipping patching\n");
+			continue;
+		}
+
+		memcpy(insnbuf, replacement, a->replacementlen);
+		insnbuf_sz = a->replacementlen;
+
+		if (a->instrlen > a->replacementlen) {
+			add_padding(insnbuf + a->replacementlen,
+				    a->instrlen - a->replacementlen);
+			insnbuf_sz += a->instrlen - a->replacementlen;
+		}
+
+		s390_kernel_write(instr, insnbuf, insnbuf_sz);
+	}
+}
+
+void __init_or_module apply_alternatives(struct alt_instr *start,
+					 struct alt_instr *end)
+{
+	if (!alt_instr_disabled)
+		__apply_alternatives(start, end);
+}
+
+extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+void __init apply_alternative_instructions(void)
+{
+	apply_alternatives(__alt_instructions, __alt_instructions_end);
+}
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index f3df9e0..85c8ead 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -175,6 +175,7 @@
 	OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
 	OFFSET(__LC_GMAP, lowcore, gmap);
 	OFFSET(__LC_PASTE, lowcore, paste);
+	OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline);
 	/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
 	OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
 	/* hardware defined lowcore locations 0x1000 - 0x18ff */
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 326f717..61fca54 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -8,18 +8,22 @@
 
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
+#include <asm/nospec-insn.h>
 #include <asm/ptrace.h>
 #include <asm/sigp.h>
 
+	GEN_BR_THUNK %r9
+	GEN_BR_THUNK %r14
+
 ENTRY(s390_base_mcck_handler)
 	basr	%r13,0
 0:	lg	%r15,__LC_PANIC_STACK	# load panic stack
 	aghi	%r15,-STACK_FRAME_OVERHEAD
 	larl	%r1,s390_base_mcck_handler_fn
-	lg	%r1,0(%r1)
-	ltgr	%r1,%r1
+	lg	%r9,0(%r1)
+	ltgr	%r9,%r9
 	jz	1f
-	basr	%r14,%r1
+	BASR_EX	%r14,%r9
 1:	la	%r1,4095
 	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
 	lpswe	__LC_MCK_OLD_PSW
@@ -36,10 +40,10 @@
 	basr	%r13,0
 0:	aghi	%r15,-STACK_FRAME_OVERHEAD
 	larl	%r1,s390_base_ext_handler_fn
-	lg	%r1,0(%r1)
-	ltgr	%r1,%r1
+	lg	%r9,0(%r1)
+	ltgr	%r9,%r9
 	jz	1f
-	basr	%r14,%r1
+	BASR_EX	%r14,%r9
 1:	lmg	%r0,%r15,__LC_SAVE_AREA_ASYNC
 	ni	__LC_EXT_OLD_PSW+1,0xfd	# clear wait state bit
 	lpswe	__LC_EXT_OLD_PSW
@@ -56,10 +60,10 @@
 	basr	%r13,0
 0:	aghi	%r15,-STACK_FRAME_OVERHEAD
 	larl	%r1,s390_base_pgm_handler_fn
-	lg	%r1,0(%r1)
-	ltgr	%r1,%r1
+	lg	%r9,0(%r1)
+	ltgr	%r9,%r9
 	jz	1f
-	basr	%r14,%r1
+	BASR_EX	%r14,%r9
 	lmg	%r0,%r15,__LC_SAVE_AREA_SYNC
 	lpswe	__LC_PGM_OLD_PSW
 1:	lpswe	disabled_wait_psw-0b(%r13)
@@ -116,7 +120,7 @@
 	larl	%r4,.Lcontinue_psw	# Restore PSW flags
 	lpswe	0(%r4)
 .Lcontinue:
-	br	%r14
+	BR_EX	%r14
 .align 16
 .Lrestart_psw:
 	.long	0x00080000,0x80000000 + .Lrestart_part2
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 6257898..0c7a7d5 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -299,6 +299,11 @@
 {
 	stfle(S390_lowcore.stfle_fac_list,
 	      ARRAY_SIZE(S390_lowcore.stfle_fac_list));
+	memcpy(S390_lowcore.alt_stfle_fac_list,
+	       S390_lowcore.stfle_fac_list,
+	       sizeof(S390_lowcore.alt_stfle_fac_list));
+	if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
+		__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
 }
 
 static __init void detect_diag9c(void)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 3bc2825..a4fd000 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -24,6 +24,7 @@
 #include <asm/setup.h>
 #include <asm/nmi.h>
 #include <asm/export.h>
+#include <asm/nospec-insn.h>
 
 __PT_R0      =	__PT_GPRS
 __PT_R1      =	__PT_GPRS + 8
@@ -105,6 +106,7 @@
 	j	3f
 1:	LAST_BREAK %r14
 	UPDATE_VTIME %r14,%r15,\timer
+	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
 2:	lg	%r15,__LC_ASYNC_STACK	# load async stack
 3:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 	.endm
@@ -163,6 +165,72 @@
 		tm	off+\addr, \mask
 	.endm
 
+	.macro BPOFF
+	.pushsection .altinstr_replacement, "ax"
+660:	.long	0xb2e8c000
+	.popsection
+661:	.long	0x47000000
+	.pushsection .altinstructions, "a"
+	.long 661b - .
+	.long 660b - .
+	.word 82
+	.byte 4
+	.byte 4
+	.popsection
+	.endm
+
+	.macro BPON
+	.pushsection .altinstr_replacement, "ax"
+662:	.long	0xb2e8d000
+	.popsection
+663:	.long	0x47000000
+	.pushsection .altinstructions, "a"
+	.long 663b - .
+	.long 662b - .
+	.word 82
+	.byte 4
+	.byte 4
+	.popsection
+	.endm
+
+	.macro BPENTER tif_ptr,tif_mask
+	.pushsection .altinstr_replacement, "ax"
+662:	.word	0xc004, 0x0000, 0x0000	# 6 byte nop
+	.word	0xc004, 0x0000, 0x0000	# 6 byte nop
+	.popsection
+664:	TSTMSK	\tif_ptr,\tif_mask
+	jz	. + 8
+	.long	0xb2e8d000
+	.pushsection .altinstructions, "a"
+	.long 664b - .
+	.long 662b - .
+	.word 82
+	.byte 12
+	.byte 12
+	.popsection
+	.endm
+
+	.macro BPEXIT tif_ptr,tif_mask
+	TSTMSK	\tif_ptr,\tif_mask
+	.pushsection .altinstr_replacement, "ax"
+662:	jnz	. + 8
+	.long	0xb2e8d000
+	.popsection
+664:	jz	. + 8
+	.long	0xb2e8c000
+	.pushsection .altinstructions, "a"
+	.long 664b - .
+	.long 662b - .
+	.word 82
+	.byte 8
+	.byte 8
+	.popsection
+	.endm
+
+	GEN_BR_THUNK %r9
+	GEN_BR_THUNK %r14
+	GEN_BR_THUNK %r14,%r11
+
 	.section .kprobes.text, "ax"
 .Ldummy:
 	/*
@@ -175,6 +243,11 @@
 	 */
 	nop	0
 
+ENTRY(__bpon)
+	.globl __bpon
+	BPON
+	BR_EX	%r14
+
 /*
  * Scheduler resume function, called by switch_to
  *  gpr2 = (task_struct *) prev
@@ -201,9 +274,9 @@
 	mvc	__LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
 	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
 	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
-	bzr	%r14
+	jz	0f
 	.insn	s,0xb2800000,__LC_LPP		# set program parameter
-	br	%r14
+0:	BR_EX	%r14
 
 .L__critical_start:
 
@@ -215,9 +288,11 @@
  */
 ENTRY(sie64a)
 	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
+	lg	%r12,__LC_CURRENT
 	stg	%r2,__SF_EMPTY(%r15)		# save control block pointer
 	stg	%r3,__SF_EMPTY+8(%r15)		# save guest register save area
 	xc	__SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
+	mvc	__SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags
 	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU		# load guest fp/vx registers ?
 	jno	.Lsie_load_guest_gprs
 	brasl	%r14,load_fpu_regs		# load guest fp/vx regs
@@ -234,7 +309,11 @@
 	jnz	.Lsie_skip
 	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 	jo	.Lsie_skip			# exit if fp/vx regs changed
+	BPEXIT	__SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
 	sie	0(%r14)
+.Lsie_exit:
+	BPOFF
+	BPENTER	__SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
 .Lsie_skip:
 	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
 	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
@@ -255,9 +334,15 @@
 sie_exit:
 	lg	%r14,__SF_EMPTY+8(%r15)		# load guest register save area
 	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
+	xgr	%r0,%r0				# clear guest registers to
+	xgr	%r1,%r1				# prevent speculative use
+	xgr	%r2,%r2
+	xgr	%r3,%r3
+	xgr	%r4,%r4
+	xgr	%r5,%r5
 	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
 	lg	%r2,__SF_EMPTY+16(%r15)		# return exit reason code
-	br	%r14
+	BR_EX	%r14
 .Lsie_fault:
 	lghi	%r14,-EFAULT
 	stg	%r14,__SF_EMPTY+16(%r15)	# set exit reason code
@@ -280,6 +365,7 @@
 	stpt	__LC_SYNC_ENTER_TIMER
 .Lsysc_stmg:
 	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
+	BPOFF
 	lg	%r10,__LC_LAST_BREAK
 	lg	%r12,__LC_THREAD_INFO
 	lghi	%r14,_PIF_SYSCALL
@@ -289,12 +375,15 @@
 	LAST_BREAK %r13
 .Lsysc_vtime:
 	UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
+	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
 	stmg	%r0,%r7,__PT_R0(%r11)
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
 	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
 	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
 	stg	%r14,__PT_FLAGS(%r11)
 .Lsysc_do_svc:
+	# clear user controlled register to prevent speculative use
+	xgr	%r0,%r0
 	lg	%r10,__TI_sysc_table(%r12)	# address of system call table
 	llgh	%r8,__PT_INT_CODE+2(%r11)
 	slag	%r8,%r8,2			# shift and test for svc 0
@@ -312,7 +401,7 @@
 	lgf	%r9,0(%r8,%r10)			# get system call add.
 	TSTMSK	__TI_flags(%r12),_TIF_TRACE
 	jnz	.Lsysc_tracesys
-	basr	%r14,%r9			# call sys_xxxx
+	BASR_EX	%r14,%r9			# call sys_xxxx
 	stg	%r2,__PT_R2(%r11)		# store return value
 
 .Lsysc_return:
@@ -324,6 +413,7 @@
 	jnz	.Lsysc_work			# check for work
 	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
 	jnz	.Lsysc_work
+	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
 .Lsysc_restore:
 	lg	%r14,__LC_VDSO_PER_CPU
 	lmg	%r0,%r10,__PT_R0(%r11)
@@ -451,7 +541,7 @@
 	lmg	%r3,%r7,__PT_R3(%r11)
 	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
 	lg	%r2,__PT_ORIG_GPR2(%r11)
-	basr	%r14,%r9		# call sys_xxx
+	BASR_EX	%r14,%r9		# call sys_xxx
 	stg	%r2,__PT_R2(%r11)	# store return value
 .Lsysc_tracenogo:
 	TSTMSK	__TI_flags(%r12),_TIF_TRACE
@@ -475,7 +565,7 @@
 	lmg	%r9,%r10,__PT_R9(%r11)	# load gprs
 ENTRY(kernel_thread_starter)
 	la	%r2,0(%r10)
-	basr	%r14,%r9
+	BASR_EX	%r14,%r9
 	j	.Lsysc_tracenogo
 
 /*
@@ -484,6 +574,7 @@
 
 ENTRY(pgm_check_handler)
 	stpt	__LC_SYNC_ENTER_TIMER
+	BPOFF
 	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
 	lg	%r10,__LC_LAST_BREAK
 	lg	%r12,__LC_THREAD_INFO
@@ -508,6 +599,7 @@
 	j	3f
 2:	LAST_BREAK %r14
 	UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
+	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
 	lg	%r15,__LC_KERNEL_STACK
 	lg	%r14,__TI_task(%r12)
 	aghi	%r14,__TASK_thread	# pointer to thread_struct
@@ -517,6 +609,15 @@
 	mvc	__THREAD_trap_tdb(256,%r14),0(%r13)
 3:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 	stmg	%r0,%r7,__PT_R0(%r11)
+	# clear user controlled registers to prevent speculative use
+	xgr	%r0,%r0
+	xgr	%r1,%r1
+	xgr	%r2,%r2
+	xgr	%r3,%r3
+	xgr	%r4,%r4
+	xgr	%r5,%r5
+	xgr	%r6,%r6
+	xgr	%r7,%r7
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
 	stmg	%r8,%r9,__PT_PSW(%r11)
 	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
@@ -538,9 +639,9 @@
 	nill	%r10,0x007f
 	sll	%r10,2
 	je	.Lpgm_return
-	lgf	%r1,0(%r10,%r1)		# load address of handler routine
+	lgf	%r9,0(%r10,%r1)		# load address of handler routine
 	lgr	%r2,%r11		# pass pointer to pt_regs
-	basr	%r14,%r1		# branch to interrupt-handler
+	BASR_EX	%r14,%r9		# branch to interrupt-handler
 .Lpgm_return:
 	LOCKDEP_SYS_EXIT
 	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
@@ -573,6 +674,7 @@
 ENTRY(io_int_handler)
 	STCK	__LC_INT_CLOCK
 	stpt	__LC_ASYNC_ENTER_TIMER
+	BPOFF
 	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
 	lg	%r10,__LC_LAST_BREAK
 	lg	%r12,__LC_THREAD_INFO
@@ -580,6 +682,16 @@
 	lmg	%r8,%r9,__LC_IO_OLD_PSW
 	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
 	stmg	%r0,%r7,__PT_R0(%r11)
+	# clear user controlled registers to prevent speculative use
+	xgr	%r0,%r0
+	xgr	%r1,%r1
+	xgr	%r2,%r2
+	xgr	%r3,%r3
+	xgr	%r4,%r4
+	xgr	%r5,%r5
+	xgr	%r6,%r6
+	xgr	%r7,%r7
+	xgr	%r10,%r10
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
 	stmg	%r8,%r9,__PT_PSW(%r11)
 	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
@@ -614,9 +726,13 @@
 	lg	%r14,__LC_VDSO_PER_CPU
 	lmg	%r0,%r10,__PT_R0(%r11)
 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
+	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
+	jno	.Lio_exit_kernel
+	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
 .Lio_exit_timer:
 	stpt	__LC_EXIT_TIMER
 	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+.Lio_exit_kernel:
 	lmg	%r11,%r15,__PT_R11(%r11)
 	lpswe	__LC_RETURN_PSW
 .Lio_done:
@@ -748,6 +864,7 @@
 ENTRY(ext_int_handler)
 	STCK	__LC_INT_CLOCK
 	stpt	__LC_ASYNC_ENTER_TIMER
+	BPOFF
 	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
 	lg	%r10,__LC_LAST_BREAK
 	lg	%r12,__LC_THREAD_INFO
@@ -755,6 +872,16 @@
 	lmg	%r8,%r9,__LC_EXT_OLD_PSW
 	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
 	stmg	%r0,%r7,__PT_R0(%r11)
+	# clear user controlled registers to prevent speculative use
+	xgr	%r0,%r0
+	xgr	%r1,%r1
+	xgr	%r2,%r2
+	xgr	%r3,%r3
+	xgr	%r4,%r4
+	xgr	%r5,%r5
+	xgr	%r6,%r6
+	xgr	%r7,%r7
+	xgr	%r10,%r10
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
 	stmg	%r8,%r9,__PT_PSW(%r11)
 	lghi	%r1,__LC_EXT_PARAMS2
@@ -787,11 +914,12 @@
 .Lpsw_idle_stcctm:
 #endif
 	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
+	BPON
 	STCK	__CLOCK_IDLE_ENTER(%r2)
 	stpt	__TIMER_IDLE_ENTER(%r2)
 .Lpsw_idle_lpsw:
 	lpswe	__SF_EMPTY(%r15)
-	br	%r14
+	BR_EX	%r14
 .Lpsw_idle_end:
 
 /*
@@ -805,7 +933,7 @@
 	lg	%r2,__LC_CURRENT
 	aghi	%r2,__TASK_thread
 	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
-	bor	%r14
+	jo	.Lsave_fpu_regs_exit
 	stfpc	__THREAD_FPU_fpc(%r2)
 .Lsave_fpu_regs_fpc_end:
 	lg	%r3,__THREAD_FPU_regs(%r2)
@@ -835,7 +963,8 @@
 	std	15,120(%r3)
 .Lsave_fpu_regs_done:
 	oi	__LC_CPU_FLAGS+7,_CIF_FPU
-	br	%r14
+.Lsave_fpu_regs_exit:
+	BR_EX	%r14
 .Lsave_fpu_regs_end:
 #if IS_ENABLED(CONFIG_KVM)
 EXPORT_SYMBOL(save_fpu_regs)
@@ -855,7 +984,7 @@
 	lg	%r4,__LC_CURRENT
 	aghi	%r4,__TASK_thread
 	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
-	bnor	%r14
+	jno	.Lload_fpu_regs_exit
 	lfpc	__THREAD_FPU_fpc(%r4)
 	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
 	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
@@ -884,7 +1013,8 @@
 	ld	15,120(%r4)
 .Lload_fpu_regs_done:
 	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
-	br	%r14
+.Lload_fpu_regs_exit:
+	BR_EX	%r14
 .Lload_fpu_regs_end:
 
 .L__critical_end:
@@ -894,6 +1024,7 @@
  */
 ENTRY(mcck_int_handler)
 	STCK	__LC_MCCK_CLOCK
+	BPOFF
 	la	%r1,4095		# revalidate r1
 	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
 	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
@@ -925,6 +1056,16 @@
 .Lmcck_skip:
 	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
 	stmg	%r0,%r7,__PT_R0(%r11)
+	# clear user controlled registers to prevent speculative use
+	xgr	%r0,%r0
+	xgr	%r1,%r1
+	xgr	%r2,%r2
+	xgr	%r3,%r3
+	xgr	%r4,%r4
+	xgr	%r5,%r5
+	xgr	%r6,%r6
+	xgr	%r7,%r7
+	xgr	%r10,%r10
 	mvc	__PT_R8(64,%r11),0(%r14)
 	stmg	%r8,%r9,__PT_PSW(%r11)
 	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
@@ -950,6 +1091,7 @@
 	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
 	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
 	jno	0f
+	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
 	stpt	__LC_EXIT_TIMER
 	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 0:	lmg	%r11,%r15,__PT_R11(%r11)
@@ -1045,7 +1187,7 @@
 	jl	0f
 	clg	%r9,BASED(.Lcleanup_table+104)	# .Lload_fpu_regs_end
 	jl	.Lcleanup_load_fpu_regs
-0:	br	%r14
+0:	BR_EX	%r14
 
 	.align	8
 .Lcleanup_table:
@@ -1070,11 +1212,12 @@
 	.quad	.Lsie_done
 
 .Lcleanup_sie:
+	BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
 	lg	%r9,__SF_EMPTY(%r15)		# get control block pointer
 	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
 	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
 	larl	%r9,sie_exit			# skip forward to sie_exit
-	br	%r14
+	BR_EX	%r14
 #endif
 
 .Lcleanup_system_call:
@@ -1116,7 +1259,8 @@
 	srag	%r9,%r9,23
 	jz	0f
 	mvc	__TI_last_break(8,%r12),16(%r11)
-0:	# set up saved register r11
+0:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+	# set up saved register r11
 	lg	%r15,__LC_KERNEL_STACK
 	la	%r9,STACK_FRAME_OVERHEAD(%r15)
 	stg	%r9,24(%r11)		# r11 pt_regs pointer
@@ -1131,7 +1275,7 @@
 	stg	%r15,56(%r11)		# r15 stack pointer
 	# set new psw address and exit
 	larl	%r9,.Lsysc_do_svc
-	br	%r14
+	BR_EX	%r14,%r11
 .Lcleanup_system_call_insn:
 	.quad	system_call
 	.quad	.Lsysc_stmg
@@ -1141,7 +1285,7 @@
 
 .Lcleanup_sysc_tif:
 	larl	%r9,.Lsysc_tif
-	br	%r14
+	BR_EX	%r14,%r11
 
 .Lcleanup_sysc_restore:
 	# check if stpt has been executed
@@ -1158,14 +1302,14 @@
 	mvc	0(64,%r11),__PT_R8(%r9)
 	lmg	%r0,%r7,__PT_R0(%r9)
 1:	lmg	%r8,%r9,__LC_RETURN_PSW
-	br	%r14
+	BR_EX	%r14,%r11
 .Lcleanup_sysc_restore_insn:
 	.quad	.Lsysc_exit_timer
 	.quad	.Lsysc_done - 4
 
 .Lcleanup_io_tif:
 	larl	%r9,.Lio_tif
-	br	%r14
+	BR_EX	%r14,%r11
 
 .Lcleanup_io_restore:
 	# check if stpt has been executed
@@ -1179,7 +1323,7 @@
 	mvc	0(64,%r11),__PT_R8(%r9)
 	lmg	%r0,%r7,__PT_R0(%r9)
 1:	lmg	%r8,%r9,__LC_RETURN_PSW
-	br	%r14
+	BR_EX	%r14,%r11
 .Lcleanup_io_restore_insn:
 	.quad	.Lio_exit_timer
 	.quad	.Lio_done - 4
@@ -1232,17 +1376,17 @@
 	# prepare return psw
 	nihh	%r8,0xfcfd		# clear irq & wait state bits
 	lg	%r9,48(%r11)		# return from psw_idle
-	br	%r14
+	BR_EX	%r14,%r11
 .Lcleanup_idle_insn:
 	.quad	.Lpsw_idle_lpsw
 
 .Lcleanup_save_fpu_regs:
 	larl	%r9,save_fpu_regs
-	br	%r14
+	BR_EX	%r14,%r11
 
 .Lcleanup_load_fpu_regs:
 	larl	%r9,load_fpu_regs
-	br	%r14
+	BR_EX	%r14,%r11
 
 /*
  * Integer constants
@@ -1258,7 +1402,6 @@
 .Lsie_critical_length:
 	.quad	.Lsie_done - .Lsie_gmap
 #endif
-
 	.section .rodata, "a"
 #define SYSCALL(esame,emu)	.long esame
 	.globl	sys_call_table
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 39127b6..df49f2a1 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -563,6 +563,7 @@
 
 static void __ipl_run(void *unused)
 {
+	__bpon();
 	diag308(DIAG308_LOAD_CLEAR, NULL);
 	if (MACHINE_IS_VM)
 		__cpcmd("IPL", NULL, 0, NULL);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 285d656..7ff9767 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -173,10 +173,9 @@
 		new -= STACK_FRAME_OVERHEAD;
 		((struct stack_frame *) new)->back_chain = old;
 		asm volatile("   la    15,0(%0)\n"
-			     "   basr  14,%2\n"
+			     "   brasl 14,__do_softirq\n"
 			     "   la    15,0(%1)\n"
-			     : : "a" (new), "a" (old),
-			         "a" (__do_softirq)
+			     : : "a" (new), "a" (old)
 			     : "0", "1", "2", "3", "4", "5", "14",
 			       "cc", "memory" );
 	} else {
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 9a17e44..be75e8e 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -8,13 +8,17 @@
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
 #include <asm/ftrace.h>
+#include <asm/nospec-insn.h>
 #include <asm/ptrace.h>
 #include <asm/export.h>
 
+	GEN_BR_THUNK %r1
+	GEN_BR_THUNK %r14
+
 	.section .kprobes.text, "ax"
 
 ENTRY(ftrace_stub)
-	br	%r14
+	BR_EX	%r14
 
 #define STACK_FRAME_SIZE  (STACK_FRAME_OVERHEAD + __PT_SIZE)
 #define STACK_PTREGS	  (STACK_FRAME_OVERHEAD)
@@ -22,7 +26,7 @@
 #define STACK_PTREGS_PSW  (STACK_PTREGS + __PT_PSW)
 
 ENTRY(_mcount)
-	br	%r14
+	BR_EX	%r14
 
 EXPORT_SYMBOL(_mcount)
 
@@ -52,7 +56,7 @@
 #endif
 	lgr	%r3,%r14
 	la	%r5,STACK_PTREGS(%r15)
-	basr	%r14,%r1
+	BASR_EX	%r14,%r1
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 # The j instruction gets runtime patched to a nop instruction.
 # See ftrace_enable_ftrace_graph_caller.
@@ -67,7 +71,7 @@
 #endif
 	lg	%r1,(STACK_PTREGS_PSW+8)(%r15)
 	lmg	%r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
-	br	%r1
+	BR_EX	%r1
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
@@ -80,6 +84,6 @@
 	aghi	%r15,STACK_FRAME_OVERHEAD
 	lgr	%r14,%r2
 	lmg	%r2,%r5,32(%r15)
-	br	%r14
+	BR_EX	%r14
 
 #endif
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index fbc0789..64ccfdf 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -31,6 +31,9 @@
 #include <linux/kernel.h>
 #include <linux/moduleloader.h>
 #include <linux/bug.h>
+#include <asm/alternative.h>
+#include <asm/nospec-branch.h>
+#include <asm/facility.h>
 
 #if 0
 #define DEBUGP printk
@@ -167,7 +170,11 @@
 	me->arch.got_offset = me->core_layout.size;
 	me->core_layout.size += me->arch.got_size;
 	me->arch.plt_offset = me->core_layout.size;
-	me->core_layout.size += me->arch.plt_size;
+	if (me->arch.plt_size) {
+		if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
+			me->arch.plt_size += PLT_ENTRY_SIZE;
+		me->core_layout.size += me->arch.plt_size;
+	}
 	return 0;
 }
 
@@ -321,9 +328,20 @@
 			unsigned int *ip;
 			ip = me->core_layout.base + me->arch.plt_offset +
 				info->plt_offset;
-			ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
-			ip[1] = 0x100a0004;
-			ip[2] = 0x07f10000;
+			ip[0] = 0x0d10e310;	/* basr 1,0  */
+			ip[1] = 0x100a0004;	/* lg	1,10(1) */
+			if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
+				unsigned int *ij;
+				ij = me->core_layout.base +
+					me->arch.plt_offset +
+					me->arch.plt_size - PLT_ENTRY_SIZE;
+				ip[2] = 0xa7f40000 +	/* j __jump_r1 */
+					(unsigned int)(u16)
+					(((unsigned long) ij - 8 -
+					  (unsigned long) ip) / 2);
+			} else {
+				ip[2] = 0x07f10000;	/* br %r1 */
+			}
 			ip[3] = (unsigned int) (val >> 32);
 			ip[4] = (unsigned int) val;
 			info->plt_initialized = 1;
@@ -428,6 +446,45 @@
 		    const Elf_Shdr *sechdrs,
 		    struct module *me)
 {
+	const Elf_Shdr *s;
+	char *secstrings, *secname;
+	void *aseg;
+
+	if (IS_ENABLED(CONFIG_EXPOLINE) &&
+	    !nospec_disable && me->arch.plt_size) {
+		unsigned int *ij;
+
+		ij = me->core_layout.base + me->arch.plt_offset +
+			me->arch.plt_size - PLT_ENTRY_SIZE;
+		if (test_facility(35)) {
+			ij[0] = 0xc6000000;	/* exrl	%r0,.+10	*/
+			ij[1] = 0x0005a7f4;	/* j	.		*/
+			ij[2] = 0x000007f1;	/* br	%r1		*/
+		} else {
+			ij[0] = 0x44000000 | (unsigned int)
+				offsetof(struct lowcore, br_r1_trampoline);
+			ij[1] = 0xa7f40000;	/* j	.		*/
+		}
+	}
+
+	secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+	for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+		aseg = (void *) s->sh_addr;
+		secname = secstrings + s->sh_name;
+
+		if (!strcmp(".altinstructions", secname))
+			/* patch .altinstructions */
+			apply_alternatives(aseg, aseg + s->sh_size);
+
+		if (IS_ENABLED(CONFIG_EXPOLINE) &&
+		    (!strncmp(".s390_indirect", secname, 14)))
+			nospec_revert(aseg, aseg + s->sh_size);
+
+		if (IS_ENABLED(CONFIG_EXPOLINE) &&
+		    (!strncmp(".s390_return", secname, 12)))
+			nospec_revert(aseg, aseg + s->sh_size);
+	}
+
 	jump_label_apply_nops(me);
 	return 0;
 }
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
new file mode 100644
index 0000000..d5eed65
--- /dev/null
+++ b/arch/s390/kernel/nospec-branch.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/module.h>
+#include <linux/device.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
+
+static int __init nobp_setup_early(char *str)
+{
+	bool enabled;
+	int rc;
+
+	rc = kstrtobool(str, &enabled);
+	if (rc)
+		return rc;
+	if (enabled && test_facility(82)) {
+		/*
+		 * The user explicitely requested nobp=1, enable it and
+		 * disable the expoline support.
+		 */
+		__set_facility(82, S390_lowcore.alt_stfle_fac_list);
+		if (IS_ENABLED(CONFIG_EXPOLINE))
+			nospec_disable = 1;
+	} else {
+		__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+	}
+	return 0;
+}
+early_param("nobp", nobp_setup_early);
+
+static int __init nospec_setup_early(char *str)
+{
+	__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+	return 0;
+}
+early_param("nospec", nospec_setup_early);
+
+static int __init nospec_report(void)
+{
+	if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+		pr_info("Spectre V2 mitigation: execute trampolines.\n");
+	if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+		pr_info("Spectre V2 mitigation: limited branch prediction.\n");
+	return 0;
+}
+arch_initcall(nospec_report);
+
+#ifdef CONFIG_EXPOLINE
+
+int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
+
+static int __init nospectre_v2_setup_early(char *str)
+{
+	nospec_disable = 1;
+	return 0;
+}
+early_param("nospectre_v2", nospectre_v2_setup_early);
+
+void __init nospec_auto_detect(void)
+{
+	if (IS_ENABLED(CC_USING_EXPOLINE)) {
+		/*
+		 * The kernel has been compiled with expolines.
+		 * Keep expolines enabled and disable nobp.
+		 */
+		nospec_disable = 0;
+		__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+	}
+	/*
+	 * If the kernel has not been compiled with expolines the
+	 * nobp setting decides what is done, this depends on the
+	 * CONFIG_KERNEL_NP option and the nobp/nospec parameters.
+	 */
+}
+
+static int __init spectre_v2_setup_early(char *str)
+{
+	if (str && !strncmp(str, "on", 2)) {
+		nospec_disable = 0;
+		__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+	}
+	if (str && !strncmp(str, "off", 3))
+		nospec_disable = 1;
+	if (str && !strncmp(str, "auto", 4))
+		nospec_auto_detect();
+	return 0;
+}
+early_param("spectre_v2", spectre_v2_setup_early);
+
+static void __init_or_module __nospec_revert(s32 *start, s32 *end)
+{
+	enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
+	u8 *instr, *thunk, *br;
+	u8 insnbuf[6];
+	s32 *epo;
+
+	/* Second part of the instruction replace is always a nop */
+	for (epo = start; epo < end; epo++) {
+		instr = (u8 *) epo + *epo;
+		if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
+			type = BRCL_EXPOLINE;	/* brcl instruction */
+		else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05)
+			type = BRASL_EXPOLINE;	/* brasl instruction */
+		else
+			continue;
+		thunk = instr + (*(int *)(instr + 2)) * 2;
+		if (thunk[0] == 0xc6 && thunk[1] == 0x00)
+			/* exrl %r0,<target-br> */
+			br = thunk + (*(int *)(thunk + 2)) * 2;
+		else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
+			 thunk[6] == 0x44 && thunk[7] == 0x00 &&
+			 (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
+			 (thunk[1] & 0xf0) == (thunk[8] & 0xf0))
+			/* larl %rx,<target br> + ex %r0,0(%rx) */
+			br = thunk + (*(int *)(thunk + 2)) * 2;
+		else
+			continue;
+		/* Check for unconditional branch 0x07f? or 0x47f???? */
+		if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0)
+			continue;
+
+		memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4);
+		switch (type) {
+		case BRCL_EXPOLINE:
+			insnbuf[0] = br[0];
+			insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
+			if (br[0] == 0x47) {
+				/* brcl to b, replace with bc + nopr */
+				insnbuf[2] = br[2];
+				insnbuf[3] = br[3];
+			} else {
+				/* brcl to br, replace with bcr + nop */
+			}
+			break;
+		case BRASL_EXPOLINE:
+			insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
+			if (br[0] == 0x47) {
+				/* brasl to b, replace with bas + nopr */
+				insnbuf[0] = 0x4d;
+				insnbuf[2] = br[2];
+				insnbuf[3] = br[3];
+			} else {
+				/* brasl to br, replace with basr + nop */
+				insnbuf[0] = 0x0d;
+			}
+			break;
+		}
+
+		s390_kernel_write(instr, insnbuf, 6);
+	}
+}
+
+void __init_or_module nospec_revert(s32 *start, s32 *end)
+{
+	if (nospec_disable)
+		__nospec_revert(start, end);
+}
+
+extern s32 __nospec_call_start[], __nospec_call_end[];
+extern s32 __nospec_return_start[], __nospec_return_end[];
+void __init nospec_init_branches(void)
+{
+	nospec_revert(__nospec_call_start, __nospec_call_end);
+	nospec_revert(__nospec_return_start, __nospec_return_end);
+}
+
+#endif /* CONFIG_EXPOLINE */
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
new file mode 100644
index 0000000..8affad5
--- /dev/null
+++ b/arch/s390/kernel/nospec-sysfs.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/device.h>
+#include <linux/cpu.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
+
+ssize_t cpu_show_spectre_v1(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+		return sprintf(buf, "Mitigation: execute trampolines\n");
+	if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+		return sprintf(buf, "Mitigation: limited branch prediction\n");
+	return sprintf(buf, "Vulnerable\n");
+}
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index fcc634c..96e4fca 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -739,6 +739,10 @@
 	 */
 	rate = 0;
 	if (attr->freq) {
+		if (!attr->sample_freq) {
+			err = -EINVAL;
+			goto out;
+		}
 		rate = freq_to_sample_rate(&si, attr->sample_freq);
 		rate = hw_limit_rate(&si, rate);
 		attr->freq = 0;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 81d0808..d856263 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -179,3 +179,21 @@
 	.stop	= c_stop,
 	.show	= show_cpuinfo,
 };
+
+int s390_isolate_bp(void)
+{
+	if (!test_facility(82))
+		return -EOPNOTSUPP;
+	set_thread_flag(TIF_ISOLATE_BP);
+	return 0;
+}
+EXPORT_SYMBOL(s390_isolate_bp);
+
+int s390_isolate_bp_guest(void)
+{
+	if (!test_facility(82))
+		return -EOPNOTSUPP;
+	set_thread_flag(TIF_ISOLATE_BP_GUEST);
+	return 0;
+}
+EXPORT_SYMBOL(s390_isolate_bp_guest);
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 89ea8c2..70d635d 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -6,8 +6,11 @@
 
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
+#include <asm/nospec-insn.h>
 #include <asm/sigp.h>
 
+	GEN_BR_THUNK %r9
+
 #
 # Issue "store status" for the current CPU to its prefix page
 # and call passed function afterwards
@@ -66,9 +69,9 @@
 	st	%r4,0(%r1)
 	st	%r5,4(%r1)
 	stg	%r2,8(%r1)
-	lgr	%r1,%r2
+	lgr	%r9,%r2
 	lgr	%r2,%r3
-	br	%r1
+	BR_EX	%r9
 
 	.section .bss
 	.align	8
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e974e53..feb9d97 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -63,6 +63,8 @@
 #include <asm/sclp.h>
 #include <asm/sysinfo.h>
 #include <asm/numa.h>
+#include <asm/alternative.h>
+#include <asm/nospec-branch.h>
 #include "entry.h"
 
 /*
@@ -335,7 +337,9 @@
 	lc->machine_flags = S390_lowcore.machine_flags;
 	lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
 	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
-	       MAX_FACILITY_BIT/8);
+	       sizeof(lc->stfle_fac_list));
+	memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
+	       sizeof(lc->alt_stfle_fac_list));
 	if (MACHINE_HAS_VX)
 		lc->vector_save_area_addr =
 			(unsigned long) &lc->vector_save_area;
@@ -372,6 +376,7 @@
 #ifdef CONFIG_SMP
 	lc->spinlock_lockval = arch_spin_lockval(0);
 #endif
+	lc->br_r1_trampoline = 0x07f1;	/* br %r1 */
 
 	set_prefix((u32)(unsigned long) lc);
 	lowcore_ptr[0] = lc;
@@ -871,6 +876,9 @@
 	init_mm.end_data = (unsigned long) &_edata;
 	init_mm.brk = (unsigned long) &_end;
 
+	if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
+		nospec_auto_detect();
+
 	parse_early_param();
 #ifdef CONFIG_CRASH_DUMP
 	/* Deactivate elfcorehdr= kernel parameter */
@@ -931,6 +939,10 @@
 	conmode_default();
 	set_preferred_console();
 
+	apply_alternative_instructions();
+	if (IS_ENABLED(CONFIG_EXPOLINE))
+		nospec_init_branches();
+
 	/* Setup zfcpdump support */
 	setup_zfcpdump();
 
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 35531fe..0a31110 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -205,6 +205,7 @@
 	lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
 	lc->cpu_nr = cpu;
 	lc->spinlock_lockval = arch_spin_lockval(cpu);
+	lc->br_r1_trampoline = 0x07f1;	/* br %r1 */
 	if (MACHINE_HAS_VX)
 		lc->vector_save_area_addr =
 			(unsigned long) &lc->vector_save_area;
@@ -253,7 +254,9 @@
 	__ctl_store(lc->cregs_save_area, 0, 15);
 	save_access_regs((unsigned int *) lc->access_regs_save_area);
 	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
-	       MAX_FACILITY_BIT/8);
+	       sizeof(lc->stfle_fac_list));
+	memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
+	       sizeof(lc->alt_stfle_fac_list));
 }
 
 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
@@ -302,6 +305,7 @@
 	mem_assign_absolute(lc->restart_fn, (unsigned long) func);
 	mem_assign_absolute(lc->restart_data, (unsigned long) data);
 	mem_assign_absolute(lc->restart_source, source_cpu);
+	__bpon();
 	asm volatile(
 		"0:	sigp	0,%0,%2	# sigp restart to target cpu\n"
 		"	brc	2,0b	# busy, try again\n"
@@ -875,6 +879,7 @@
 void __noreturn cpu_die(void)
 {
 	idle_task_exit();
+	__bpon();
 	pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
 	for (;;) ;
 }
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index 2d6b6e8..4e76aaf 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -12,6 +12,7 @@
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 #include <asm/asm-offsets.h>
+#include <asm/nospec-insn.h>
 #include <asm/sigp.h>
 
 /*
@@ -23,6 +24,8 @@
  * (see below) in the resume process.
  * This function runs with disabled interrupts.
  */
+	GEN_BR_THUNK %r14
+
 	.section .text
 ENTRY(swsusp_arch_suspend)
 	stmg	%r6,%r15,__SF_GPRS(%r15)
@@ -102,7 +105,7 @@
 	spx	0x318(%r1)
 	lmg	%r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
 	lghi	%r2,0
-	br	%r14
+	BR_EX	%r14
 
 /*
  * Restore saved memory image to correct place and restore register context.
@@ -200,7 +203,7 @@
 	lghi	%r1,0
 	sam31
 	sigp	%r1,%r0,SIGP_SET_ARCHITECTURE
-	basr	%r14,%r3
+	brasl	%r14,_sclp_print_early
 	larl	%r3,.Ldisabled_wait_31
 	lpsw	0(%r3)
 4:
@@ -266,7 +269,7 @@
 	/* Return 0 */
 	lmg	%r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
 	lghi	%r2,0
-	br	%r14
+	BR_EX	%r14
 
 	.section .data..nosave,"aw",@progbits
 	.align	8
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index 66956c0..3d04dfd 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -147,6 +147,15 @@
 	return orig;
 }
 
+bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+			     struct pt_regs *regs)
+{
+	if (ctx == RP_CHECK_CHAIN_CALL)
+		return user_stack_pointer(regs) <= ret->stack;
+	else
+		return user_stack_pointer(regs) < ret->stack;
+}
+
 /* Instruction Emulation */
 
 static void adjust_psw_addr(psw_t *psw, unsigned long len)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 115bda2..dd96b46 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -99,6 +99,43 @@
 		EXIT_DATA
 	}
 
+	/*
+	 * struct alt_inst entries. From the header (alternative.h):
+	 * "Alternative instructions for different CPU types or capabilities"
+	 * Think locking instructions on spinlocks.
+	 * Note, that it is a part of __init region.
+	 */
+	. = ALIGN(8);
+	.altinstructions : {
+		__alt_instructions = .;
+		*(.altinstructions)
+		__alt_instructions_end = .;
+	}
+
+	/*
+	 * And here are the replacement instructions. The linker sticks
+	 * them as binary blobs. The .altinstructions has enough data to
+	 * get the address and the length of them to patch the kernel safely.
+	 * Note, that it is a part of __init region.
+	 */
+	.altinstr_replacement : {
+		*(.altinstr_replacement)
+	}
+
+	/*
+	 * Table with the patch locations to undo expolines
+	*/
+	.nospec_call_table : {
+		__nospec_call_start = . ;
+		*(.s390_indirect*)
+		__nospec_call_end = . ;
+	}
+	.nospec_return_table : {
+		__nospec_return_start = . ;
+		*(.s390_return*)
+		__nospec_return_end = . ;
+	}
+
 	/* early.c uses stsi, which requires page aligned data. */
 	. = ALIGN(PAGE_SIZE);
 	INIT_DATA_SECTION(0x100)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index a70ff09..2032ab8 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -401,6 +401,9 @@
 	case KVM_CAP_S390_RI:
 		r = test_facility(64);
 		break;
+	case KVM_CAP_S390_BPB:
+		r = test_facility(82);
+		break;
 	default:
 		r = 0;
 	}
@@ -1713,6 +1716,8 @@
 	kvm_s390_set_prefix(vcpu, 0);
 	if (test_kvm_facility(vcpu->kvm, 64))
 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
+	if (test_kvm_facility(vcpu->kvm, 82))
+		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
 	/* fprs can be synchronized via vrs, even if the guest has no vx. With
 	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
 	 */
@@ -1829,7 +1834,6 @@
 	if (test_fp_ctl(current->thread.fpu.fpc))
 		/* User space provided an invalid FPC, let's clear it */
 		current->thread.fpu.fpc = 0;
-
 	save_access_regs(vcpu->arch.host_acrs);
 	restore_access_regs(vcpu->run->s.regs.acrs);
 	gmap_enable(vcpu->arch.enabled_gmap);
@@ -1877,6 +1881,7 @@
 	current->thread.fpu.fpc = 0;
 	vcpu->arch.sie_block->gbea = 1;
 	vcpu->arch.sie_block->pp = 0;
+	vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
 	kvm_clear_async_pf_completion_queue(vcpu);
 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
@@ -2744,6 +2749,11 @@
 		if (riccb->valid)
 			vcpu->arch.sie_block->ecb3 |= 0x01;
 	}
+	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
+	    test_kvm_facility(vcpu->kvm, 82)) {
+		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+		vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
+	}
 
 	kvm_run->kvm_dirty_regs = 0;
 }
@@ -2762,6 +2772,7 @@
 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
+	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
 }
 
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index d8673e2..51f842c 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -217,6 +217,12 @@
 	memcpy(scb_o->gcr, scb_s->gcr, 128);
 	scb_o->pp = scb_s->pp;
 
+	/* branch prediction */
+	if (test_kvm_facility(vcpu->kvm, 82)) {
+		scb_o->fpf &= ~FPF_BPBC;
+		scb_o->fpf |= scb_s->fpf & FPF_BPBC;
+	}
+
 	/* interrupt intercept */
 	switch (scb_s->icptcode) {
 	case ICPT_PROGI:
@@ -259,6 +265,7 @@
 	scb_s->ecb3 = 0;
 	scb_s->ecd = 0;
 	scb_s->fac = 0;
+	scb_s->fpf = 0;
 
 	rc = prepare_cpuflags(vcpu, vsie_page);
 	if (rc)
@@ -316,6 +323,9 @@
 			prefix_unmapped(vsie_page);
 		scb_s->ecb |= scb_o->ecb & 0x10U;
 	}
+	/* branch prediction */
+	if (test_kvm_facility(vcpu->kvm, 82))
+		scb_s->fpf |= scb_o->fpf & FPF_BPBC;
 	/* SIMD */
 	if (test_kvm_facility(vcpu->kvm, 129)) {
 		scb_s->eca |= scb_o->eca & 0x00020000U;
@@ -539,7 +549,7 @@
 
 	gpa = scb_o->itdba & ~0xffUL;
 	if (gpa && (scb_s->ecb & 0x10U)) {
-		if (!(gpa & ~0x1fffU)) {
+		if (!(gpa & ~0x1fffUL)) {
 			rc = set_validity_icpt(scb_s, 0x0080U);
 			goto unpin;
 		}
@@ -754,6 +764,7 @@
 {
 	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+	int guest_bp_isolation;
 	int rc;
 
 	handle_last_fault(vcpu, vsie_page);
@@ -764,6 +775,20 @@
 		s390_handle_mcck();
 
 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+
+	/* save current guest state of bp isolation override */
+	guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
+
+	/*
+	 * The guest is running with BPBC, so we have to force it on for our
+	 * nested guest. This is done by enabling BPBC globally, so the BPBC
+	 * control in the SCB (which the nested guest can modify) is simply
+	 * ignored.
+	 */
+	if (test_kvm_facility(vcpu->kvm, 82) &&
+	    vcpu->arch.sie_block->fpf & FPF_BPBC)
+		set_thread_flag(TIF_ISOLATE_BP_GUEST);
+
 	local_irq_disable();
 	guest_enter_irqoff();
 	local_irq_enable();
@@ -773,6 +798,11 @@
 	local_irq_disable();
 	guest_exit_irqoff();
 	local_irq_enable();
+
+	/* restore guest state for bp isolation override */
+	if (!guest_bp_isolation)
+		clear_thread_flag(TIF_ISOLATE_BP_GUEST);
+
 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 
 	if (rc > 0)
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index be9fa65..e7672ed 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -6,6 +6,9 @@
 
 #include <linux/linkage.h>
 #include <asm/export.h>
+#include <asm/nospec-insn.h>
+
+	GEN_BR_THUNK %r14
 
 /*
  * memset implementation
@@ -39,7 +42,7 @@
 .Lmemset_clear_rest:
 	larl	%r3,.Lmemset_xc
 	ex	%r4,0(%r3)
-	br	%r14
+	BR_EX	%r14
 .Lmemset_fill:
 	stc	%r3,0(%r2)
 	cghi	%r4,1
@@ -56,7 +59,7 @@
 .Lmemset_fill_rest:
 	larl	%r3,.Lmemset_mvc
 	ex	%r4,0(%r3)
-	br	%r14
+	BR_EX	%r14
 .Lmemset_xc:
 	xc	0(1,%r1),0(%r1)
 .Lmemset_mvc:
@@ -79,7 +82,7 @@
 .Lmemcpy_rest:
 	larl	%r5,.Lmemcpy_mvc
 	ex	%r4,0(%r5)
-	br	%r14
+	BR_EX	%r14
 .Lmemcpy_loop:
 	mvc	0(256,%r1),0(%r3)
 	la	%r1,256(%r1)
diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S
index a1c917d..fa716f2 100644
--- a/arch/s390/net/bpf_jit.S
+++ b/arch/s390/net/bpf_jit.S
@@ -8,6 +8,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/nospec-insn.h>
 #include "bpf_jit.h"
 
 /*
@@ -53,7 +54,7 @@
 	clg	%r3,STK_OFF_HLEN(%r15);	/* Offset + SIZE > hlen? */	\
 	jh	sk_load_##NAME##_slow;					\
 	LOAD	%r14,-SIZE(%r3,%r12);	/* Get data from skb */		\
-	b	OFF_OK(%r6);		/* Return */			\
+	B_EX	OFF_OK,%r6;		/* Return */			\
 									\
 sk_load_##NAME##_slow:;							\
 	lgr	%r2,%r7;		/* Arg1 = skb pointer */	\
@@ -63,11 +64,14 @@
 	brasl	%r14,skb_copy_bits;	/* Get data from skb */		\
 	LOAD	%r14,STK_OFF_TMP(%r15);	/* Load from temp bufffer */	\
 	ltgr	%r2,%r2;		/* Set cc to (%r2 != 0) */	\
-	br	%r6;			/* Return */
+	BR_EX	%r6;			/* Return */
 
 sk_load_common(word, 4, llgf)	/* r14 = *(u32 *) (skb->data+offset) */
 sk_load_common(half, 2, llgh)	/* r14 = *(u16 *) (skb->data+offset) */
 
+	GEN_BR_THUNK %r6
+	GEN_B_THUNK OFF_OK,%r6
+
 /*
  * Load 1 byte from SKB (optimized version)
  */
@@ -79,7 +83,7 @@
 	clg	%r3,STK_OFF_HLEN(%r15)	# Offset >= hlen?
 	jnl	sk_load_byte_slow
 	llgc	%r14,0(%r3,%r12)	# Get byte from skb
-	b	OFF_OK(%r6)		# Return OK
+	B_EX	OFF_OK,%r6		# Return OK
 
 sk_load_byte_slow:
 	lgr	%r2,%r7			# Arg1 = skb pointer
@@ -89,7 +93,7 @@
 	brasl	%r14,skb_copy_bits	# Get data from skb
 	llgc	%r14,STK_OFF_TMP(%r15)	# Load result from temp buffer
 	ltgr	%r2,%r2			# Set cc to (%r2 != 0)
-	br	%r6			# Return cc
+	BR_EX	%r6			# Return cc
 
 #define sk_negative_common(NAME, SIZE, LOAD)				\
 sk_load_##NAME##_slow_neg:;						\
@@ -103,7 +107,7 @@
 	jz	bpf_error;						\
 	LOAD	%r14,0(%r2);		/* Get data from pointer */	\
 	xr	%r3,%r3;		/* Set cc to zero */		\
-	br	%r6;			/* Return cc */
+	BR_EX	%r6;			/* Return cc */
 
 sk_negative_common(word, 4, llgf)
 sk_negative_common(half, 2, llgh)
@@ -112,4 +116,4 @@
 bpf_error:
 # force a return 0 from jit handler
 	ltgr	%r15,%r15	# Set condition code
-	br	%r6
+	BR_EX	%r6
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index e8dee62..e7ce257 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -24,6 +24,8 @@
 #include <linux/bpf.h>
 #include <asm/cacheflush.h>
 #include <asm/dis.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
 #include "bpf_jit.h"
 
 int bpf_jit_enable __read_mostly;
@@ -41,6 +43,8 @@
 	int base_ip;		/* Base address for literal pool */
 	int ret0_ip;		/* Address of return 0 */
 	int exit_ip;		/* Address of exit */
+	int r1_thunk_ip;	/* Address of expoline thunk for 'br %r1' */
+	int r14_thunk_ip;	/* Address of expoline thunk for 'br %r14' */
 	int tail_call_start;	/* Tail call start offset */
 	int labels[1];		/* Labels for local jumps */
 };
@@ -251,6 +255,19 @@
 	REG_SET_SEEN(b2);					\
 })
 
+#define EMIT6_PCREL_RILB(op, b, target)				\
+({								\
+	int rel = (target - jit->prg) / 2;			\
+	_EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff);	\
+	REG_SET_SEEN(b);					\
+})
+
+#define EMIT6_PCREL_RIL(op, target)				\
+({								\
+	int rel = (target - jit->prg) / 2;			\
+	_EMIT6(op | rel >> 16, rel & 0xffff);			\
+})
+
 #define _EMIT6_IMM(op, imm)					\
 ({								\
 	unsigned int __imm = (imm);				\
@@ -470,8 +487,45 @@
 	EMIT4(0xb9040000, REG_2, BPF_REG_0);
 	/* Restore registers */
 	save_restore_regs(jit, REGS_RESTORE);
+	if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
+		jit->r14_thunk_ip = jit->prg;
+		/* Generate __s390_indirect_jump_r14 thunk */
+		if (test_facility(35)) {
+			/* exrl %r0,.+10 */
+			EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
+		} else {
+			/* larl %r1,.+14 */
+			EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
+			/* ex 0,0(%r1) */
+			EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
+		}
+		/* j . */
+		EMIT4_PCREL(0xa7f40000, 0);
+	}
 	/* br %r14 */
 	_EMIT2(0x07fe);
+
+	if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable &&
+	    (jit->seen & SEEN_FUNC)) {
+		jit->r1_thunk_ip = jit->prg;
+		/* Generate __s390_indirect_jump_r1 thunk */
+		if (test_facility(35)) {
+			/* exrl %r0,.+10 */
+			EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
+			/* j . */
+			EMIT4_PCREL(0xa7f40000, 0);
+			/* br %r1 */
+			_EMIT2(0x07f1);
+		} else {
+			/* larl %r1,.+14 */
+			EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
+			/* ex 0,S390_lowcore.br_r1_tampoline */
+			EMIT4_DISP(0x44000000, REG_0, REG_0,
+				   offsetof(struct lowcore, br_r1_trampoline));
+			/* j . */
+			EMIT4_PCREL(0xa7f40000, 0);
+		}
+	}
 }
 
 /*
@@ -977,8 +1031,13 @@
 		/* lg %w1,<d(imm)>(%l) */
 		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
 			      EMIT_CONST_U64(func));
-		/* basr %r14,%w1 */
-		EMIT2(0x0d00, REG_14, REG_W1);
+		if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
+			/* brasl %r14,__s390_indirect_jump_r1 */
+			EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
+		} else {
+			/* basr %r14,%w1 */
+			EMIT2(0x0d00, REG_14, REG_W1);
+		}
 		/* lgr %b0,%r2: load return value into %b0 */
 		EMIT4(0xb9040000, BPF_REG_0, REG_2);
 		if (bpf_helper_changes_skb_data((void *)func)) {
diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h
index d007874..8f8cf94 100644
--- a/arch/sh/include/asm/futex.h
+++ b/arch/sh/include/asm/futex.h
@@ -27,21 +27,12 @@
 	return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval);
 }
 
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	u32 oparg = (encoded_op << 8) >> 20;
-	u32 cmparg = (encoded_op << 20) >> 20;
 	u32 oldval, newval, prev;
 	int ret;
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
-
 	pagefault_disable();
 
 	do {
@@ -80,17 +71,8 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = ((int)oldval < (int)cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = ((int)oldval >= (int)cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = ((int)oldval <= (int)cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = ((int)oldval > (int)cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
 
 	return ret;
 }
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index c001f78..28cc612 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -255,7 +255,7 @@
 	mov.l	@r8, r8
 	jsr	@r8
 	 nop
-	bra	__restore_all
+	bra	ret_from_exception
 	 nop
 	CFI_ENDPROC
 
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 24827a3..89d299c 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -82,7 +82,11 @@
 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
 
 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+static inline int atomic_xchg(atomic_t *v, int new)
+{
+	return xchg(&v->counter, new);
+}
 
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
diff --git a/arch/sparc/include/asm/futex_64.h b/arch/sparc/include/asm/futex_64.h
index 4e899b0..1cfd89d 100644
--- a/arch/sparc/include/asm/futex_64.h
+++ b/arch/sparc/include/asm/futex_64.h
@@ -29,22 +29,14 @@
 	: "r" (uaddr), "r" (oparg), "i" (-EFAULT)	\
 	: "memory")
 
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret, tem;
 
-	if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
-		return -EFAULT;
 	if (unlikely((((unsigned long) uaddr) & 0x3UL)))
 		return -EINVAL;
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
 	pagefault_disable();
 
 	switch (op) {
@@ -69,17 +61,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index b6802b9..81ad06a 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -952,7 +952,7 @@
 			  pmd_t *pmd);
 
 #define __HAVE_ARCH_PMDP_INVALIDATE
-extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 			    pmd_t *pmdp);
 
 #define __HAVE_ARCH_PGTABLE_DEPOSIT
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index c56a195..b2722ed 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -219,17 +219,28 @@
 	}
 }
 
+static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
+		unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+	pmd_t old;
+
+	do {
+		old = *pmdp;
+	} while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
+
+	return old;
+}
+
 /*
  * This routine is only called when splitting a THP
  */
-void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 		     pmd_t *pmdp)
 {
-	pmd_t entry = *pmdp;
+	pmd_t old, entry;
 
-	pmd_val(entry) &= ~_PAGE_VALID;
-
-	set_pmd_at(vma->vm_mm, address, pmdp, entry);
+	entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
+	old = pmdp_establish(vma, address, pmdp, entry);
 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 
 	/*
@@ -240,6 +251,8 @@
 	if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
 	    !is_huge_zero_page(pmd_page(entry)))
 		(vma->vm_mm)->context.thp_pte_count--;
+
+	return old;
 }
 
 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
index e64a1b7..83c1e63 100644
--- a/arch/tile/include/asm/futex.h
+++ b/arch/tile/include/asm/futex.h
@@ -106,12 +106,9 @@
 	lock = __atomic_hashed_lock((int __force *)uaddr)
 #endif
 
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int uninitialized_var(val), ret;
 
 	__futex_prolog();
@@ -119,12 +116,6 @@
 	/* The 32-bit futex code makes this assumption, so validate it here. */
 	BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
-
 	pagefault_disable();
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -148,30 +139,9 @@
 	}
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ:
-			ret = (val == cmparg);
-			break;
-		case FUTEX_OP_CMP_NE:
-			ret = (val != cmparg);
-			break;
-		case FUTEX_OP_CMP_LT:
-			ret = (val < cmparg);
-			break;
-		case FUTEX_OP_CMP_GE:
-			ret = (val >= cmparg);
-			break;
-		case FUTEX_OP_CMP_LE:
-			ret = (val <= cmparg);
-			break;
-		case FUTEX_OP_CMP_GT:
-			ret = (val > cmparg);
-			break;
-		default:
-			ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = val;
+
 	return ret;
 }
 
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index cc69e37..c0ad1bb 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -330,7 +330,8 @@
 	if (status != EFI_SUCCESS)
 		goto free_struct;
 
-	memcpy(rom->romdata, pci->romimage, pci->romsize);
+	memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
+	       pci->romsize);
 	return status;
 
 free_struct:
@@ -436,7 +437,8 @@
 	if (status != EFI_SUCCESS)
 		goto free_struct;
 
-	memcpy(rom->romdata, pci->romimage, pci->romsize);
+	memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
+	       pci->romsize);
 	return status;
 
 free_struct:
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
index 43c9575..308aac3 100644
--- a/arch/x86/configs/x86_64_cuttlefish_defconfig
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -12,6 +12,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_MEMCG=y
@@ -59,7 +60,8 @@
 CONFIG_RANDOMIZE_BASE=y
 CONFIG_PHYSICAL_ALIGN=0x1000000
 CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0 reboot=p"
+CONFIG_CMDLINE="console=ttyS0 reboot=p nopti"
+CONFIG_PM_AUTOSLEEP=y
 CONFIG_PM_WAKELOCKS=y
 CONFIG_PM_WAKELOCKS_LIMIT=0
 # CONFIG_PM_WAKELOCKS_GC is not set
@@ -92,8 +94,8 @@
 CONFIG_IP_PIMSM_V1=y
 CONFIG_IP_PIMSM_V2=y
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=y
 CONFIG_INET_ESP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
 CONFIG_INET_DIAG_DESTROY=y
 CONFIG_TCP_CONG_ADVANCED=y
@@ -108,6 +110,7 @@
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_NETLABEL=y
 CONFIG_NETFILTER=y
@@ -137,6 +140,7 @@
 CONFIG_NETFILTER_XT_TARGET_TRACE=y
 CONFIG_NETFILTER_XT_TARGET_SECMARK=y
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
 CONFIG_NETFILTER_XT_MATCH_COMMENT=y
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@@ -311,11 +315,11 @@
 CONFIG_DRM=y
 # CONFIG_DRM_FBDEV_EMULATION is not set
 CONFIG_DRM_VIRTIO_GPU=y
+CONFIG_FB=y
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_HIDRAW=y
 CONFIG_UHID=y
-# CONFIG_HID_GENERIC is not set
 CONFIG_HID_A4TECH=y
 CONFIG_HID_ACRUX=y
 CONFIG_HID_ACRUX_FF=y
@@ -379,6 +383,8 @@
 CONFIG_USB_DUMMY_HCD=y
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_USB_CONFIGFS_F_ACC=y
 CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
 CONFIG_USB_CONFIGFS_UEVENT=y
@@ -403,6 +409,9 @@
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_EXT4_ENCRYPTION=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -436,11 +445,11 @@
 CONFIG_DEBUG_STACKOVERFLOW=y
 CONFIG_LOCKUP_DETECTOR=y
 CONFIG_PANIC_TIMEOUT=5
-# CONFIG_SCHED_DEBUG is not set
 CONFIG_SCHEDSTATS=y
 CONFIG_TIMER_STATS=y
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_UPROBE_EVENT=y
 CONFIG_IO_DELAY_NONE=y
 CONFIG_DEBUG_BOOT_PARAMS=y
 CONFIG_OPTIMIZE_INLINING=y
@@ -452,3 +461,4 @@
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_SHA512=y
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 34b3fa2..9e32d40 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -2,6 +2,8 @@
 # Arch-specific CryptoAPI modules.
 #
 
+OBJECT_FILES_NON_STANDARD := y
+
 avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no)
 avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
 				$(comma)4)$(comma)%ymm2,yes,no)
diff --git a/arch/x86/crypto/sha1-mb/Makefile b/arch/x86/crypto/sha1-mb/Makefile
index 2f87563..2e14acc 100644
--- a/arch/x86/crypto/sha1-mb/Makefile
+++ b/arch/x86/crypto/sha1-mb/Makefile
@@ -2,6 +2,8 @@
 # Arch-specific CryptoAPI modules.
 #
 
+OBJECT_FILES_NON_STANDARD := y
+
 avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
                                 $(comma)4)$(comma)%ymm2,yes,no)
 ifeq ($(avx2_supported),yes)
diff --git a/arch/x86/crypto/sha256-mb/Makefile b/arch/x86/crypto/sha256-mb/Makefile
index 41089e7..45b4fca 100644
--- a/arch/x86/crypto/sha256-mb/Makefile
+++ b/arch/x86/crypto/sha256-mb/Makefile
@@ -2,6 +2,8 @@
 # Arch-specific CryptoAPI modules.
 #
 
+OBJECT_FILES_NON_STANDARD := y
+
 avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
                                 $(comma)4)$(comma)%ymm2,yes,no)
 ifeq ($(avx2_supported),yes)
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index d540966..51a858e 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -171,7 +171,8 @@
 		 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
 
 VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \
-	$(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
+	$(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS) \
+	$(filter --target=% --gcc-toolchain=%,$(KBUILD_CFLAGS))
 GCOV_PROFILE := n
 
 #
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index f73796d..655a65e 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -26,6 +26,7 @@
 #include <linux/cpu.h>
 #include <linux/bitops.h>
 #include <linux/device.h>
+#include <linux/nospec.h>
 
 #include <asm/apic.h>
 #include <asm/stacktrace.h>
@@ -303,17 +304,20 @@
 
 	config = attr->config;
 
-	cache_type = (config >>  0) & 0xff;
+	cache_type = (config >> 0) & 0xff;
 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
 		return -EINVAL;
+	cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX);
 
 	cache_op = (config >>  8) & 0xff;
 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
 		return -EINVAL;
+	cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
 
 	cache_result = (config >> 16) & 0xff;
 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
 		return -EINVAL;
+	cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX);
 
 	val = hw_cache_event_ids[cache_type][cache_op][cache_result];
 
@@ -420,6 +424,8 @@
 	if (attr->config >= x86_pmu.max_events)
 		return -EINVAL;
 
+	attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events);
+
 	/*
 	 * The generic map:
 	 */
@@ -1149,16 +1155,13 @@
 
 	per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
 
-	if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) ||
-	    local64_read(&hwc->prev_count) != (u64)-left) {
-		/*
-		 * The hw event starts counting from this event offset,
-		 * mark it to be able to extra future deltas:
-		 */
-		local64_set(&hwc->prev_count, (u64)-left);
+	/*
+	 * The hw event starts counting from this event offset,
+	 * mark it to be able to extra future deltas:
+	 */
+	local64_set(&hwc->prev_count, (u64)-left);
 
-		wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
-	}
+	wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
 
 	/*
 	 * Due to erratum on certan cpu we need
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 6f353a8..8150393 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2066,16 +2066,23 @@
 	int bit, loops;
 	u64 status;
 	int handled;
+	int pmu_enabled;
 
 	cpuc = this_cpu_ptr(&cpu_hw_events);
 
 	/*
+	 * Save the PMU state.
+	 * It needs to be restored when leaving the handler.
+	 */
+	pmu_enabled = cpuc->enabled;
+	/*
 	 * No known reason to not always do late ACK,
 	 * but just in case do it opt-in.
 	 */
 	if (!x86_pmu.late_ack)
 		apic_write(APIC_LVTPC, APIC_DM_NMI);
 	intel_bts_disable_local();
+	cpuc->enabled = 0;
 	__intel_pmu_disable_all();
 	handled = intel_pmu_drain_bts_buffer();
 	handled += intel_bts_interrupt();
@@ -2173,7 +2180,8 @@
 
 done:
 	/* Only restore PMU state when it's active. See x86_pmu_disable(). */
-	if (cpuc->enabled)
+	cpuc->enabled = pmu_enabled;
+	if (pmu_enabled)
 		__intel_pmu_enable_all(0, true);
 	intel_bts_enable_local();
 
@@ -3019,7 +3027,7 @@
  * Therefore the effective (average) period matches the requested period,
  * despite coarser hardware granularity.
  */
-static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
+static u64 bdw_limit_period(struct perf_event *event, u64 left)
 {
 	if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
 			X86_CONFIG(.event=0xc0, .umask=0x01)) {
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 1076c9a..47d526c 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -90,6 +90,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/perf_event.h>
+#include <linux/nospec.h>
 #include <asm/cpu_device_id.h>
 #include <asm/intel-family.h>
 #include "../perf_event.h"
@@ -300,6 +301,7 @@
 	} else if (event->pmu == &cstate_pkg_pmu) {
 		if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
 			return -EINVAL;
+		cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
 		if (!pkg_msr[cfg].attr)
 			return -EINVAL;
 		event->hw.event_base = pkg_msr[cfg].msr;
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 8e7a3f1..f26e26e 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1110,6 +1110,7 @@
 	if (pebs == NULL)
 		return;
 
+	regs->flags &= ~PERF_EFLAGS_EXACT;
 	sample_type = event->attr.sample_type;
 	dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
 
@@ -1154,7 +1155,6 @@
 	 */
 	*regs = *iregs;
 	regs->flags = pebs->flags;
-	set_linear_ip(regs, pebs->ip);
 
 	if (sample_type & PERF_SAMPLE_REGS_INTR) {
 		regs->ax = pebs->ax;
@@ -1190,13 +1190,22 @@
 #endif
 	}
 
-	if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
-		regs->ip = pebs->real_ip;
-		regs->flags |= PERF_EFLAGS_EXACT;
-	} else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs))
-		regs->flags |= PERF_EFLAGS_EXACT;
-	else
-		regs->flags &= ~PERF_EFLAGS_EXACT;
+	if (event->attr.precise_ip > 1) {
+		/* Haswell and later have the eventing IP, so use it: */
+		if (x86_pmu.intel_cap.pebs_format >= 2) {
+			set_linear_ip(regs, pebs->real_ip);
+			regs->flags |= PERF_EFLAGS_EXACT;
+		} else {
+			/* Otherwise use PEBS off-by-1 IP: */
+			set_linear_ip(regs, pebs->ip);
+
+			/* ... and try to fix it up using the LBR entries: */
+			if (intel_pmu_pebs_fixup_ip(regs))
+				regs->flags |= PERF_EFLAGS_EXACT;
+		}
+	} else
+		set_linear_ip(regs, pebs->ip);
+
 
 	if ((sample_type & PERF_SAMPLE_ADDR) &&
 	    x86_pmu.intel_cap.pebs_format >= 1)
@@ -1263,17 +1272,84 @@
 	return NULL;
 }
 
+/*
+ * Special variant of intel_pmu_save_and_restart() for auto-reload.
+ */
+static int
+intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	int shift = 64 - x86_pmu.cntval_bits;
+	u64 period = hwc->sample_period;
+	u64 prev_raw_count, new_raw_count;
+	s64 new, old;
+
+	WARN_ON(!period);
+
+	/*
+	 * drain_pebs() only happens when the PMU is disabled.
+	 */
+	WARN_ON(this_cpu_read(cpu_hw_events.enabled));
+
+	prev_raw_count = local64_read(&hwc->prev_count);
+	rdpmcl(hwc->event_base_rdpmc, new_raw_count);
+	local64_set(&hwc->prev_count, new_raw_count);
+
+	/*
+	 * Since the counter increments a negative counter value and
+	 * overflows on the sign switch, giving the interval:
+	 *
+	 *   [-period, 0]
+	 *
+	 * the difference between two consequtive reads is:
+	 *
+	 *   A) value2 - value1;
+	 *      when no overflows have happened in between,
+	 *
+	 *   B) (0 - value1) + (value2 - (-period));
+	 *      when one overflow happened in between,
+	 *
+	 *   C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
+	 *      when @n overflows happened in between.
+	 *
+	 * Here A) is the obvious difference, B) is the extension to the
+	 * discrete interval, where the first term is to the top of the
+	 * interval and the second term is from the bottom of the next
+	 * interval and C) the extension to multiple intervals, where the
+	 * middle term is the whole intervals covered.
+	 *
+	 * An equivalent of C, by reduction, is:
+	 *
+	 *   value2 - value1 + n * period
+	 */
+	new = ((s64)(new_raw_count << shift) >> shift);
+	old = ((s64)(prev_raw_count << shift) >> shift);
+	local64_add(new - old + count * period, &event->count);
+
+	perf_event_update_userpage(event);
+
+	return 0;
+}
+
 static void __intel_pmu_pebs_event(struct perf_event *event,
 				   struct pt_regs *iregs,
 				   void *base, void *top,
 				   int bit, int count)
 {
+	struct hw_perf_event *hwc = &event->hw;
 	struct perf_sample_data data;
 	struct pt_regs regs;
 	void *at = get_next_pebs_record_by_bit(base, top, bit);
 
-	if (!intel_pmu_save_and_restart(event) &&
-	    !(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD))
+	if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
+		/*
+		 * Now, auto-reload is only enabled in fixed period mode.
+		 * The reload value is always hwc->sample_period.
+		 * May need to change it, if auto-reload is enabled in
+		 * freq mode later.
+		 */
+		intel_pmu_save_and_restart_reload(event, count);
+	} else if (!intel_pmu_save_and_restart(event))
 		return;
 
 	while (count > 1) {
@@ -1325,8 +1401,11 @@
 		return;
 
 	n = top - at;
-	if (n <= 0)
+	if (n <= 0) {
+		if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
+			intel_pmu_save_and_restart_reload(event, 0);
 		return;
+	}
 
 	__intel_pmu_pebs_event(event, iregs, at, top, 0, n);
 }
@@ -1349,8 +1428,22 @@
 
 	ds->pebs_index = ds->pebs_buffer_base;
 
-	if (unlikely(base >= top))
+	if (unlikely(base >= top)) {
+		/*
+		 * The drain_pebs() could be called twice in a short period
+		 * for auto-reload event in pmu::read(). There are no
+		 * overflows have happened in between.
+		 * It needs to call intel_pmu_save_and_restart_reload() to
+		 * update the event->count for this case.
+		 */
+		for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled,
+				 x86_pmu.max_pebs_events) {
+			event = cpuc->events[bit];
+			if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
+				intel_pmu_save_and_restart_reload(event, 0);
+		}
 		return;
+	}
 
 	for (at = base; at < top; at += x86_pmu.pebs_record_size) {
 		struct pebs_record_nhm *p = at;
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index 4bb3ec6..be0b196 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -1,4 +1,5 @@
 #include <linux/perf_event.h>
+#include <linux/nospec.h>
 #include <asm/intel-family.h>
 
 enum perf_msr_id {
@@ -136,9 +137,6 @@
 	if (event->attr.type != event->pmu->type)
 		return -ENOENT;
 
-	if (cfg >= PERF_MSR_EVENT_MAX)
-		return -EINVAL;
-
 	/* unsupported modes and filters */
 	if (event->attr.exclude_user   ||
 	    event->attr.exclude_kernel ||
@@ -149,6 +147,11 @@
 	    event->attr.sample_period) /* no sampling */
 		return -EINVAL;
 
+	if (cfg >= PERF_MSR_EVENT_MAX)
+		return -EINVAL;
+
+	cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
+
 	if (!msr[cfg].attr)
 		return -EINVAL;
 
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index bcbb1d2..f356317 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -548,7 +548,7 @@
 	struct x86_pmu_quirk *quirks;
 	int		perfctr_second_write;
 	bool		late_ack;
-	unsigned	(*limit_period)(struct perf_event *event, unsigned l);
+	u64		(*limit_period)(struct perf_event *event, u64 l);
 
 	/*
 	 * sysfs attrs
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index a248531..c278f27 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -197,6 +197,9 @@
 #define X86_FEATURE_RETPOLINE	( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
 #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
 
+#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
+#define X86_FEATURE_SSBD	( 7*32+17) /* Speculative Store Bypass Disable */
+
 #define X86_FEATURE_RSB_CTXSW	( 7*32+19) /* "" Fill RSB on context switches */
 
 /* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
@@ -204,6 +207,13 @@
 
 #define X86_FEATURE_USE_IBPB	( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
 #define X86_FEATURE_USE_IBRS_FW	( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+#define X86_FEATURE_LS_CFG_SSBD	( 7*32+24) /* "" AMD SSBD implementation */
+#define X86_FEATURE_IBRS	( 7*32+25) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_IBPB	( 7*32+26) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_STIBP	( 7*32+27) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ZEN		( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
+
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
@@ -261,9 +271,10 @@
 /* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
 #define X86_FEATURE_CLZERO	(13*32+0) /* CLZERO instruction */
 #define X86_FEATURE_IRPERF	(13*32+1) /* Instructions Retired Count */
-#define X86_FEATURE_IBPB	(13*32+12) /* Indirect Branch Prediction Barrier */
-#define X86_FEATURE_IBRS	(13*32+14) /* Indirect Branch Restricted Speculation */
-#define X86_FEATURE_STIBP	(13*32+15) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_IBPB	(13*32+12) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_AMD_IBRS	(13*32+14) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_AMD_STIBP	(13*32+15) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_VIRT_SSBD	(13*32+25) /* Virtualized Speculative Store Bypass Disable */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
 #define X86_FEATURE_DTHERM	(14*32+ 0) /* Digital Thermal Sensor */
@@ -299,6 +310,7 @@
 #define X86_FEATURE_SUCCOR	(17*32+1) /* Uncorrectable error containment and recovery */
 #define X86_FEATURE_SMCA	(17*32+3) /* Scalable MCA */
 
+
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
 #define X86_FEATURE_AVX512_4VNNIW	(18*32+ 2) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4FMAPS	(18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
@@ -306,6 +318,7 @@
 #define X86_FEATURE_SPEC_CTRL		(18*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_INTEL_STIBP		(18*32+27) /* "" Single Thread Indirect Branch Predictors */
 #define X86_FEATURE_ARCH_CAPABILITIES	(18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+#define X86_FEATURE_SPEC_CTRL_SSBD	(18*32+31) /* "" Speculative Store Bypass Disable */
 
 /*
  * BUG word(s)
@@ -335,5 +348,6 @@
 #define X86_BUG_CPU_MELTDOWN	X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
 #define X86_BUG_SPECTRE_V1	X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
 #define X86_BUG_SPECTRE_V2	X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
+#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index b4c1f54..f4dc9b6 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -41,20 +41,11 @@
 		       "+m" (*uaddr), "=&r" (tem)		\
 		     : "r" (oparg), "i" (-EFAULT), "1" (0))
 
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret, tem;
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
-
 	pagefault_disable();
 
 	switch (op) {
@@ -80,30 +71,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ:
-			ret = (oldval == cmparg);
-			break;
-		case FUTEX_OP_CMP_NE:
-			ret = (oldval != cmparg);
-			break;
-		case FUTEX_OP_CMP_LT:
-			ret = (oldval < cmparg);
-			break;
-		case FUTEX_OP_CMP_GE:
-			ret = (oldval >= cmparg);
-			break;
-		case FUTEX_OP_CMP_LE:
-			ret = (oldval <= cmparg);
-			break;
-		case FUTEX_OP_CMP_GT:
-			ret = (oldval > cmparg);
-			break;
-		default:
-			ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 39bcefc..bb07878 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -68,6 +68,11 @@
 extern struct legacy_pic *legacy_pic;
 extern struct legacy_pic null_legacy_pic;
 
+static inline bool has_legacy_pic(void)
+{
+	return legacy_pic != &null_legacy_pic;
+}
+
 static inline int nr_legacy_irqs(void)
 {
 	return legacy_pic->nr_legacy_irqs;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 20cfeeb..7598a6c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -864,7 +864,7 @@
 	int (*hardware_setup)(void);               /* __init */
 	void (*hardware_unsetup)(void);            /* __exit */
 	bool (*cpu_has_accelerated_tpr)(void);
-	bool (*cpu_has_high_real_mode_segbase)(void);
+	bool (*has_emulated_msr)(int index);
 	void (*cpuid_update)(struct kvm_vcpu *vcpu);
 
 	int (*vm_init)(struct kvm *kvm);
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 5a295bb..7336508 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -113,7 +113,7 @@
 
 	#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
 	if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
-		/* pkey 0 is the default and always allocated */
+		/* pkey 0 is the default and allocated implicitly */
 		mm->context.pkey_allocation_map = 0x1;
 		/* -1 means unallocated or invalid */
 		mm->context.execute_only_pkey = -1;
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index c768bc1..1ec13e2 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -40,6 +40,8 @@
 #define MSR_IA32_SPEC_CTRL		0x00000048 /* Speculation Control */
 #define SPEC_CTRL_IBRS			(1 << 0)   /* Indirect Branch Restricted Speculation */
 #define SPEC_CTRL_STIBP			(1 << 1)   /* Single Thread Indirect Branch Predictors */
+#define SPEC_CTRL_SSBD_SHIFT		2	   /* Speculative Store Bypass Disable bit */
+#define SPEC_CTRL_SSBD			(1 << SPEC_CTRL_SSBD_SHIFT)   /* Speculative Store Bypass Disable */
 
 #define MSR_IA32_PRED_CMD		0x00000049 /* Prediction Command */
 #define PRED_CMD_IBPB			(1 << 0)   /* Indirect Branch Prediction Barrier */
@@ -61,6 +63,11 @@
 #define MSR_IA32_ARCH_CAPABILITIES	0x0000010a
 #define ARCH_CAP_RDCL_NO		(1 << 0)   /* Not susceptible to Meltdown */
 #define ARCH_CAP_IBRS_ALL		(1 << 1)   /* Enhanced IBRS support */
+#define ARCH_CAP_SSB_NO			(1 << 4)   /*
+						    * Not susceptible to Speculative Store Bypass
+						    * attack, so no Speculative Store Bypass
+						    * control required.
+						    */
 
 #define MSR_IA32_BBL_CR_CTL		0x00000119
 #define MSR_IA32_BBL_CR_CTL3		0x0000011e
@@ -135,6 +142,7 @@
 
 /* DEBUGCTLMSR bits (others vary by model): */
 #define DEBUGCTLMSR_LBR			(1UL <<  0) /* last branch recording */
+#define DEBUGCTLMSR_BTF_SHIFT		1
 #define DEBUGCTLMSR_BTF			(1UL <<  1) /* single-step on branches */
 #define DEBUGCTLMSR_TR			(1UL <<  6)
 #define DEBUGCTLMSR_BTS			(1UL <<  7)
@@ -315,6 +323,8 @@
 #define MSR_AMD64_IBSOPDATA4		0xc001103d
 #define MSR_AMD64_IBS_REG_COUNT_MAX	8 /* includes MSR_AMD64_IBSBRTARGET */
 
+#define MSR_AMD64_VIRT_SPEC_CTRL	0xc001011f
+
 /* Fam 17h MSRs */
 #define MSR_F17H_IRPERF			0xc00000e9
 
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index f928ad9..8b38df9 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -217,6 +217,14 @@
 	SPECTRE_V2_IBRS,
 };
 
+/* The Speculative Store Bypass disable variants */
+enum ssb_mitigation {
+	SPEC_STORE_BYPASS_NONE,
+	SPEC_STORE_BYPASS_DISABLE,
+	SPEC_STORE_BYPASS_PRCTL,
+	SPEC_STORE_BYPASS_SECCOMP,
+};
+
 extern char __indirect_thunk_start[];
 extern char __indirect_thunk_end[];
 
@@ -241,22 +249,27 @@
 #endif
 }
 
-#define alternative_msr_write(_msr, _val, _feature)		\
-	asm volatile(ALTERNATIVE("",				\
-				 "movl %[msr], %%ecx\n\t"	\
-				 "movl %[val], %%eax\n\t"	\
-				 "movl $0, %%edx\n\t"		\
-				 "wrmsr",			\
-				 _feature)			\
-		     : : [msr] "i" (_msr), [val] "i" (_val)	\
-		     : "eax", "ecx", "edx", "memory")
+static __always_inline
+void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
+{
+	asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
+		: : "c" (msr),
+		    "a" ((u32)val),
+		    "d" ((u32)(val >> 32)),
+		    [feature] "i" (feature)
+		: "memory");
+}
 
 static inline void indirect_branch_prediction_barrier(void)
 {
-	alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
-			      X86_FEATURE_USE_IBPB);
+	u64 val = PRED_CMD_IBPB;
+
+	alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
 }
 
+/* The Intel SPEC CTRL MSR base value cache */
+extern u64 x86_spec_ctrl_base;
+
 /*
  * With retpoline, we must use IBRS to restrict branch prediction
  * before calling into firmware.
@@ -265,14 +278,18 @@
  */
 #define firmware_restrict_branch_speculation_start()			\
 do {									\
+	u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS;			\
+									\
 	preempt_disable();						\
-	alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS,	\
+	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
 			      X86_FEATURE_USE_IBRS_FW);			\
 } while (0)
 
 #define firmware_restrict_branch_speculation_end()			\
 do {									\
-	alternative_msr_write(MSR_IA32_SPEC_CTRL, 0,			\
+	u64 val = x86_spec_ctrl_base;					\
+									\
+	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
 			      X86_FEATURE_USE_IBRS_FW);			\
 	preempt_enable();						\
 } while (0)
diff --git a/arch/x86/include/asm/orc_types.h b/arch/x86/include/asm/orc_types.h
new file mode 100644
index 0000000..7dc777a
--- /dev/null
+++ b/arch/x86/include/asm/orc_types.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ORC_TYPES_H
+#define _ORC_TYPES_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/*
+ * The ORC_REG_* registers are base registers which are used to find other
+ * registers on the stack.
+ *
+ * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
+ * address of the previous frame: the caller's SP before it called the current
+ * function.
+ *
+ * ORC_REG_UNDEFINED means the corresponding register's value didn't change in
+ * the current frame.
+ *
+ * The most commonly used base registers are SP and BP -- which the previous SP
+ * is usually based on -- and PREV_SP and UNDEFINED -- which the previous BP is
+ * usually based on.
+ *
+ * The rest of the base registers are needed for special cases like entry code
+ * and GCC realigned stacks.
+ */
+#define ORC_REG_UNDEFINED		0
+#define ORC_REG_PREV_SP			1
+#define ORC_REG_DX			2
+#define ORC_REG_DI			3
+#define ORC_REG_BP			4
+#define ORC_REG_SP			5
+#define ORC_REG_R10			6
+#define ORC_REG_R13			7
+#define ORC_REG_BP_INDIRECT		8
+#define ORC_REG_SP_INDIRECT		9
+#define ORC_REG_MAX			15
+
+/*
+ * ORC_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP (the
+ * caller's SP right before it made the call).  Used for all callable
+ * functions, i.e. all C code and all callable asm functions.
+ *
+ * ORC_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset points
+ * to a fully populated pt_regs from a syscall, interrupt, or exception.
+ *
+ * ORC_TYPE_REGS_IRET: Used in entry code to indicate that sp_reg+sp_offset
+ * points to the iret return frame.
+ *
+ * The UNWIND_HINT macros are used only for the unwind_hint struct.  They
+ * aren't used in struct orc_entry due to size and complexity constraints.
+ * Objtool converts them to real types when it converts the hints to orc
+ * entries.
+ */
+#define ORC_TYPE_CALL			0
+#define ORC_TYPE_REGS			1
+#define ORC_TYPE_REGS_IRET		2
+#define UNWIND_HINT_TYPE_SAVE		3
+#define UNWIND_HINT_TYPE_RESTORE	4
+
+#ifndef __ASSEMBLY__
+/*
+ * This struct is more or less a vastly simplified version of the DWARF Call
+ * Frame Information standard.  It contains only the necessary parts of DWARF
+ * CFI, simplified for ease of access by the in-kernel unwinder.  It tells the
+ * unwinder how to find the previous SP and BP (and sometimes entry regs) on
+ * the stack for a given code address.  Each instance of the struct corresponds
+ * to one or more code locations.
+ */
+struct orc_entry {
+	s16		sp_offset;
+	s16		bp_offset;
+	unsigned	sp_reg:4;
+	unsigned	bp_reg:4;
+	unsigned	type:2;
+};
+
+/*
+ * This struct is used by asm and inline asm code to manually annotate the
+ * location of registers on the stack for the ORC unwinder.
+ *
+ * Type can be either ORC_TYPE_* or UNWIND_HINT_TYPE_*.
+ */
+struct unwind_hint {
+	u32		ip;
+	s16		sp_offset;
+	u8		sp_reg;
+	u8		type;
+};
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ORC_TYPES_H */
diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
index b3b09b9..c50d6dc 100644
--- a/arch/x86/include/asm/pkeys.h
+++ b/arch/x86/include/asm/pkeys.h
@@ -1,6 +1,8 @@
 #ifndef _ASM_X86_PKEYS_H
 #define _ASM_X86_PKEYS_H
 
+#define ARCH_DEFAULT_PKEY	0
+
 #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
 
 extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
@@ -14,7 +16,7 @@
 static inline int execute_only_pkey(struct mm_struct *mm)
 {
 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
-		return 0;
+		return ARCH_DEFAULT_PKEY;
 
 	return __execute_only_pkey(mm);
 }
@@ -48,13 +50,21 @@
 {
 	/*
 	 * "Allocated" pkeys are those that have been returned
-	 * from pkey_alloc().  pkey 0 is special, and never
-	 * returned from pkey_alloc().
+	 * from pkey_alloc() or pkey 0 which is allocated
+	 * implicitly when the mm is created.
 	 */
-	if (pkey <= 0)
+	if (pkey < 0)
 		return false;
 	if (pkey >= arch_max_pkey())
 		return false;
+	/*
+	 * The exec-only pkey is set in the allocation map, but
+	 * is not available to any of the user interfaces like
+	 * mprotect_pkey().
+	 */
+	if (pkey == mm->context.execute_only_pkey)
+		return false;
+
 	return mm_pkey_allocation_map(mm) & (1U << pkey);
 }
 
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
new file mode 100644
index 0000000..ae7c2c5
--- /dev/null
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_SPECCTRL_H_
+#define _ASM_X86_SPECCTRL_H_
+
+#include <linux/thread_info.h>
+#include <asm/nospec-branch.h>
+
+/*
+ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
+ * the guest has, while on VMEXIT we restore the host view. This
+ * would be easier if SPEC_CTRL were architecturally maskable or
+ * shadowable for guests but this is not (currently) the case.
+ * Takes the guest view of SPEC_CTRL MSR as a parameter and also
+ * the guest's version of VIRT_SPEC_CTRL, if emulated.
+ */
+extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
+
+/**
+ * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
+ * @guest_spec_ctrl:		The guest content of MSR_SPEC_CTRL
+ * @guest_virt_spec_ctrl:	The guest controlled bits of MSR_VIRT_SPEC_CTRL
+ *				(may get translated to MSR_AMD64_LS_CFG bits)
+ *
+ * Avoids writing to the MSR if the content/bits are the same
+ */
+static inline
+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+{
+	x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
+}
+
+/**
+ * x86_spec_ctrl_restore_host - Restore host speculation control registers
+ * @guest_spec_ctrl:		The guest content of MSR_SPEC_CTRL
+ * @guest_virt_spec_ctrl:	The guest controlled bits of MSR_VIRT_SPEC_CTRL
+ *				(may get translated to MSR_AMD64_LS_CFG bits)
+ *
+ * Avoids writing to the MSR if the content/bits are the same
+ */
+static inline
+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+{
+	x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
+}
+
+/* AMD specific Speculative Store Bypass MSR data */
+extern u64 x86_amd_ls_cfg_base;
+extern u64 x86_amd_ls_cfg_ssbd_mask;
+
+static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
+{
+	BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
+	return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
+}
+
+static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
+{
+	BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
+	return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
+}
+
+static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
+{
+	return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
+}
+
+#ifdef CONFIG_SMP
+extern void speculative_store_bypass_ht_init(void);
+#else
+static inline void speculative_store_bypass_ht_init(void) { }
+#endif
+
+extern void speculative_store_bypass_update(unsigned long tif);
+
+static inline void speculative_store_bypass_update_current(void)
+{
+	speculative_store_bypass_update(current_thread_info()->flags);
+}
+
+#endif
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 89978b9..2d8788a 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -83,6 +83,7 @@
 #define TIF_SIGPENDING		2	/* signal pending */
 #define TIF_NEED_RESCHED	3	/* rescheduling necessary */
 #define TIF_SINGLESTEP		4	/* reenable singlestep on user return*/
+#define TIF_SSBD		5	/* Reduced data speculation */
 #define TIF_SYSCALL_EMU		6	/* syscall emulation active */
 #define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
 #define TIF_SECCOMP		8	/* secure computing */
@@ -104,8 +105,9 @@
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
-#define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
+#define _TIF_SSBD		(1 << TIF_SSBD)
 #define _TIF_SYSCALL_EMU	(1 << TIF_SYSCALL_EMU)
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
@@ -139,7 +141,7 @@
 
 /* flags to check in __switch_to() */
 #define _TIF_WORK_CTXSW							\
-	(_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
+	(_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
 
 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 99185a0..686a58d 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -111,6 +111,16 @@
 	}
 }
 
+static inline void cr4_toggle_bits(unsigned long mask)
+{
+	unsigned long cr4;
+
+	cr4 = this_cpu_read(cpu_tlbstate.cr4);
+	cr4 ^= mask;
+	this_cpu_write(cpu_tlbstate.cr4, cr4);
+	__write_cr4(cr4);
+}
+
 /* Read the CR4 shadow. */
 static inline unsigned long cr4_read_shadow(void)
 {
diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h
new file mode 100644
index 0000000..5e02b11
--- /dev/null
+++ b/arch/x86/include/asm/unwind_hints.h
@@ -0,0 +1,103 @@
+#ifndef _ASM_X86_UNWIND_HINTS_H
+#define _ASM_X86_UNWIND_HINTS_H
+
+#include "orc_types.h"
+
+#ifdef __ASSEMBLY__
+
+/*
+ * In asm, there are two kinds of code: normal C-type callable functions and
+ * the rest.  The normal callable functions can be called by other code, and
+ * don't do anything unusual with the stack.  Such normal callable functions
+ * are annotated with the ENTRY/ENDPROC macros.  Most asm code falls in this
+ * category.  In this case, no special debugging annotations are needed because
+ * objtool can automatically generate the ORC data for the ORC unwinder to read
+ * at runtime.
+ *
+ * Anything which doesn't fall into the above category, such as syscall and
+ * interrupt handlers, tends to not be called directly by other functions, and
+ * often does unusual non-C-function-type things with the stack pointer.  Such
+ * code needs to be annotated such that objtool can understand it.  The
+ * following CFI hint macros are for this type of code.
+ *
+ * These macros provide hints to objtool about the state of the stack at each
+ * instruction.  Objtool starts from the hints and follows the code flow,
+ * making automatic CFI adjustments when it sees pushes and pops, filling out
+ * the debuginfo as necessary.  It will also warn if it sees any
+ * inconsistencies.
+ */
+.macro UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=0 type=ORC_TYPE_CALL
+#ifdef CONFIG_STACK_VALIDATION
+.Lunwind_hint_ip_\@:
+	.pushsection .discard.unwind_hints
+		/* struct unwind_hint */
+		.long .Lunwind_hint_ip_\@ - .
+		.short \sp_offset
+		.byte \sp_reg
+		.byte \type
+	.popsection
+#endif
+.endm
+
+.macro UNWIND_HINT_EMPTY
+	UNWIND_HINT sp_reg=ORC_REG_UNDEFINED
+.endm
+
+.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 iret=0
+	.if \base == %rsp && \indirect
+		.set sp_reg, ORC_REG_SP_INDIRECT
+	.elseif \base == %rsp
+		.set sp_reg, ORC_REG_SP
+	.elseif \base == %rbp
+		.set sp_reg, ORC_REG_BP
+	.elseif \base == %rdi
+		.set sp_reg, ORC_REG_DI
+	.elseif \base == %rdx
+		.set sp_reg, ORC_REG_DX
+	.elseif \base == %r10
+		.set sp_reg, ORC_REG_R10
+	.else
+		.error "UNWIND_HINT_REGS: bad base register"
+	.endif
+
+	.set sp_offset, \offset
+
+	.if \iret
+		.set type, ORC_TYPE_REGS_IRET
+	.elseif \extra == 0
+		.set type, ORC_TYPE_REGS_IRET
+		.set sp_offset, \offset + (16*8)
+	.else
+		.set type, ORC_TYPE_REGS
+	.endif
+
+	UNWIND_HINT sp_reg=sp_reg sp_offset=sp_offset type=type
+.endm
+
+.macro UNWIND_HINT_IRET_REGS base=%rsp offset=0
+	UNWIND_HINT_REGS base=\base offset=\offset iret=1
+.endm
+
+.macro UNWIND_HINT_FUNC sp_offset=8
+	UNWIND_HINT sp_offset=\sp_offset
+.endm
+
+#else /* !__ASSEMBLY__ */
+
+#define UNWIND_HINT(sp_reg, sp_offset, type)			\
+	"987: \n\t"						\
+	".pushsection .discard.unwind_hints\n\t"		\
+	/* struct unwind_hint */				\
+	".long 987b - .\n\t"					\
+	".short " __stringify(sp_offset) "\n\t"		\
+	".byte " __stringify(sp_reg) "\n\t"			\
+	".byte " __stringify(type) "\n\t"			\
+	".popsection\n\t"
+
+#define UNWIND_HINT_SAVE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_SAVE)
+
+#define UNWIND_HINT_RESTORE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_RESTORE)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_UNWIND_HINTS_H */
diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h
index 809134c..90ab9a7 100644
--- a/arch/x86/include/uapi/asm/msgbuf.h
+++ b/arch/x86/include/uapi/asm/msgbuf.h
@@ -1 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_X64_MSGBUF_H
+#define __ASM_X64_MSGBUF_H
+
+#if !defined(__x86_64__) || !defined(__ILP32__)
 #include <asm-generic/msgbuf.h>
+#else
+/*
+ * The msqid64_ds structure for x86 architecture with x32 ABI.
+ *
+ * On x86-32 and x86-64 we can just use the generic definition, but
+ * x32 uses the same binary layout as x86_64, which is differnet
+ * from other 32-bit architectures.
+ */
+
+struct msqid64_ds {
+	struct ipc64_perm msg_perm;
+	__kernel_time_t msg_stime;	/* last msgsnd time */
+	__kernel_time_t msg_rtime;	/* last msgrcv time */
+	__kernel_time_t msg_ctime;	/* last change time */
+	__kernel_ulong_t msg_cbytes;	/* current number of bytes on queue */
+	__kernel_ulong_t msg_qnum;	/* number of messages in queue */
+	__kernel_ulong_t msg_qbytes;	/* max number of bytes on queue */
+	__kernel_pid_t msg_lspid;	/* pid of last msgsnd */
+	__kernel_pid_t msg_lrpid;	/* last receive pid */
+	__kernel_ulong_t __unused4;
+	__kernel_ulong_t __unused5;
+};
+
+#endif
+
+#endif /* __ASM_GENERIC_MSGBUF_H */
diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h
index 83c05fc..644421f 100644
--- a/arch/x86/include/uapi/asm/shmbuf.h
+++ b/arch/x86/include/uapi/asm/shmbuf.h
@@ -1 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_X86_SHMBUF_H
+#define __ASM_X86_SHMBUF_H
+
+#if !defined(__x86_64__) || !defined(__ILP32__)
 #include <asm-generic/shmbuf.h>
+#else
+/*
+ * The shmid64_ds structure for x86 architecture with x32 ABI.
+ *
+ * On x86-32 and x86-64 we can just use the generic definition, but
+ * x32 uses the same binary layout as x86_64, which is differnet
+ * from other 32-bit architectures.
+ */
+
+struct shmid64_ds {
+	struct ipc64_perm	shm_perm;	/* operation perms */
+	size_t			shm_segsz;	/* size of segment (bytes) */
+	__kernel_time_t		shm_atime;	/* last attach time */
+	__kernel_time_t		shm_dtime;	/* last detach time */
+	__kernel_time_t		shm_ctime;	/* last change time */
+	__kernel_pid_t		shm_cpid;	/* pid of creator */
+	__kernel_pid_t		shm_lpid;	/* pid of last operator */
+	__kernel_ulong_t	shm_nattch;	/* no. of current attaches */
+	__kernel_ulong_t	__unused4;
+	__kernel_ulong_t	__unused5;
+};
+
+struct shminfo64 {
+	__kernel_ulong_t	shmmax;
+	__kernel_ulong_t	shmmin;
+	__kernel_ulong_t	shmmni;
+	__kernel_ulong_t	shmseg;
+	__kernel_ulong_t	shmall;
+	__kernel_ulong_t	__unused1;
+	__kernel_ulong_t	__unused2;
+	__kernel_ulong_t	__unused3;
+	__kernel_ulong_t	__unused4;
+};
+
+#endif
+
+#endif /* __ASM_X86_SHMBUF_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 79076d7..4c9c615 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -29,6 +29,7 @@
 OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o	:= y
 OBJECT_FILES_NON_STANDARD_mcount_$(BITS).o		:= y
 OBJECT_FILES_NON_STANDARD_test_nx.o			:= y
+OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o	:= y
 
 # If instrumentation of this dir is enabled, boot hangs during first second.
 # Probably could be more selective here, but note that files related to irqs,
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
index 26b78d8..85a9e17 100644
--- a/arch/x86/kernel/acpi/Makefile
+++ b/arch/x86/kernel/acpi/Makefile
@@ -1,3 +1,5 @@
+OBJECT_FILES_NON_STANDARD_wakeup_$(BITS).o := y
+
 obj-$(CONFIG_ACPI)		+= boot.o
 obj-$(CONFIG_ACPI_SLEEP)	+= sleep.o wakeup_$(BITS).o
 obj-$(CONFIG_ACPI_APEI)		+= apei.o
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index c6583ef..76cf21f 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1403,7 +1403,7 @@
 	 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
 	 */
 	value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
-	if (!cpu && (pic_mode || !value)) {
+	if (!cpu && (pic_mode || !value || skip_ioapic_setup)) {
 		value = APIC_DM_EXTINT;
 		apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
 	} else {
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c375bc6..4c2be99 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -9,6 +9,7 @@
 #include <asm/processor.h>
 #include <asm/apic.h>
 #include <asm/cpu.h>
+#include <asm/spec-ctrl.h>
 #include <asm/smp.h>
 #include <asm/pci-direct.h>
 #include <asm/delay.h>
@@ -542,6 +543,26 @@
 		rdmsrl(MSR_FAM10H_NODE_ID, value);
 		nodes_per_socket = ((value >> 3) & 7) + 1;
 	}
+
+	if (c->x86 >= 0x15 && c->x86 <= 0x17) {
+		unsigned int bit;
+
+		switch (c->x86) {
+		case 0x15: bit = 54; break;
+		case 0x16: bit = 33; break;
+		case 0x17: bit = 10; break;
+		default: return;
+		}
+		/*
+		 * Try to cache the base value so further operations can
+		 * avoid RMW. If that faults, do not enable SSBD.
+		 */
+		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
+			setup_force_cpu_cap(X86_FEATURE_SSBD);
+			x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
+		}
+	}
 }
 
 static void early_init_amd(struct cpuinfo_x86 *c)
@@ -728,6 +749,17 @@
 	}
 }
 
+static void init_amd_zn(struct cpuinfo_x86 *c)
+{
+	set_cpu_cap(c, X86_FEATURE_ZEN);
+	/*
+	 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
+	 * all up to and including B1.
+	 */
+	if (c->x86_model <= 1 && c->x86_stepping <= 1)
+		set_cpu_cap(c, X86_FEATURE_CPB);
+}
+
 static void init_amd(struct cpuinfo_x86 *c)
 {
 	u32 dummy;
@@ -758,6 +790,7 @@
 	case 0x10: init_amd_gh(c); break;
 	case 0x12: init_amd_ln(c); break;
 	case 0x15: init_amd_bd(c); break;
+	case 0x17: init_amd_zn(c); break;
 	}
 
 	/* Enable workaround for FXSAVE leak */
@@ -824,8 +857,9 @@
 		if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
 			set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
 
-	/* AMD CPUs don't reset SS attributes on SYSRET */
-	set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
+	if (!cpu_has(c, X86_FEATURE_XENPV))
+		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
 }
 
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index b8b0b6e..86af9b1 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -11,8 +11,10 @@
 #include <linux/utsname.h>
 #include <linux/cpu.h>
 #include <linux/module.h>
+#include <linux/nospec.h>
+#include <linux/prctl.h>
 
-#include <asm/nospec-branch.h>
+#include <asm/spec-ctrl.h>
 #include <asm/cmdline.h>
 #include <asm/bugs.h>
 #include <asm/processor.h>
@@ -26,6 +28,27 @@
 #include <asm/intel-family.h>
 
 static void __init spectre_v2_select_mitigation(void);
+static void __init ssb_select_mitigation(void);
+
+/*
+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
+ * writes to SPEC_CTRL contain whatever reserved bits have been set.
+ */
+u64 __ro_after_init x86_spec_ctrl_base;
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+
+/*
+ * The vendor and possibly platform specific bits which can be modified in
+ * x86_spec_ctrl_base.
+ */
+static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
+
+/*
+ * AMD specific MSR info for Speculative Store Bypass control.
+ * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
+ */
+u64 __ro_after_init x86_amd_ls_cfg_base;
+u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
 
 void __init check_bugs(void)
 {
@@ -36,9 +59,27 @@
 		print_cpu_info(&boot_cpu_data);
 	}
 
+	/*
+	 * Read the SPEC_CTRL MSR to account for reserved bits which may
+	 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
+	 * init code as it is not enumerated and depends on the family.
+	 */
+	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+		rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+	/* Allow STIBP in MSR_SPEC_CTRL if supported */
+	if (boot_cpu_has(X86_FEATURE_STIBP))
+		x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
+
 	/* Select the proper spectre mitigation before patching alternatives */
 	spectre_v2_select_mitigation();
 
+	/*
+	 * Select proper mitigation for any exposure to the Speculative Store
+	 * Bypass vulnerability.
+	 */
+	ssb_select_mitigation();
+
 #ifdef CONFIG_X86_32
 	/*
 	 * Check whether we are able to run this kernel safely on SMP.
@@ -92,7 +133,76 @@
 #undef pr_fmt
 #define pr_fmt(fmt)     "Spectre V2 : " fmt
 
-static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+	SPECTRE_V2_NONE;
+
+void
+x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
+{
+	u64 msrval, guestval, hostval = x86_spec_ctrl_base;
+	struct thread_info *ti = current_thread_info();
+
+	/* Is MSR_SPEC_CTRL implemented ? */
+	if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
+		/*
+		 * Restrict guest_spec_ctrl to supported values. Clear the
+		 * modifiable bits in the host base value and or the
+		 * modifiable bits from the guest value.
+		 */
+		guestval = hostval & ~x86_spec_ctrl_mask;
+		guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
+
+		/* SSBD controlled in MSR_SPEC_CTRL */
+		if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+			hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
+
+		if (hostval != guestval) {
+			msrval = setguest ? guestval : hostval;
+			wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
+		}
+	}
+
+	/*
+	 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
+	 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
+	 */
+	if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
+	    !static_cpu_has(X86_FEATURE_VIRT_SSBD))
+		return;
+
+	/*
+	 * If the host has SSBD mitigation enabled, force it in the host's
+	 * virtual MSR value. If its not permanently enabled, evaluate
+	 * current's TIF_SSBD thread flag.
+	 */
+	if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
+		hostval = SPEC_CTRL_SSBD;
+	else
+		hostval = ssbd_tif_to_spec_ctrl(ti->flags);
+
+	/* Sanitize the guest value */
+	guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
+
+	if (hostval != guestval) {
+		unsigned long tif;
+
+		tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
+				 ssbd_spec_ctrl_to_tif(hostval);
+
+		speculative_store_bypass_update(tif);
+	}
+}
+EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
+
+static void x86_amd_ssb_disable(void)
+{
+	u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
+
+	if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+		wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
+	else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+		wrmsrl(MSR_AMD64_LS_CFG, msrval);
+}
 
 #ifdef RETPOLINE
 static bool spectre_v2_bad_module;
@@ -311,32 +421,289 @@
 }
 
 #undef pr_fmt
+#define pr_fmt(fmt)	"Speculative Store Bypass: " fmt
+
+static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
+
+/* The kernel command line selection */
+enum ssb_mitigation_cmd {
+	SPEC_STORE_BYPASS_CMD_NONE,
+	SPEC_STORE_BYPASS_CMD_AUTO,
+	SPEC_STORE_BYPASS_CMD_ON,
+	SPEC_STORE_BYPASS_CMD_PRCTL,
+	SPEC_STORE_BYPASS_CMD_SECCOMP,
+};
+
+static const char *ssb_strings[] = {
+	[SPEC_STORE_BYPASS_NONE]	= "Vulnerable",
+	[SPEC_STORE_BYPASS_DISABLE]	= "Mitigation: Speculative Store Bypass disabled",
+	[SPEC_STORE_BYPASS_PRCTL]	= "Mitigation: Speculative Store Bypass disabled via prctl",
+	[SPEC_STORE_BYPASS_SECCOMP]	= "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
+};
+
+static const struct {
+	const char *option;
+	enum ssb_mitigation_cmd cmd;
+} ssb_mitigation_options[] = {
+	{ "auto",	SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
+	{ "on",		SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
+	{ "off",	SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
+	{ "prctl",	SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
+	{ "seccomp",	SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
+};
+
+static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
+{
+	enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
+	char arg[20];
+	int ret, i;
+
+	if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
+		return SPEC_STORE_BYPASS_CMD_NONE;
+	} else {
+		ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
+					  arg, sizeof(arg));
+		if (ret < 0)
+			return SPEC_STORE_BYPASS_CMD_AUTO;
+
+		for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
+			if (!match_option(arg, ret, ssb_mitigation_options[i].option))
+				continue;
+
+			cmd = ssb_mitigation_options[i].cmd;
+			break;
+		}
+
+		if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
+			pr_err("unknown option (%s). Switching to AUTO select\n", arg);
+			return SPEC_STORE_BYPASS_CMD_AUTO;
+		}
+	}
+
+	return cmd;
+}
+
+static enum ssb_mitigation __init __ssb_select_mitigation(void)
+{
+	enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
+	enum ssb_mitigation_cmd cmd;
+
+	if (!boot_cpu_has(X86_FEATURE_SSBD))
+		return mode;
+
+	cmd = ssb_parse_cmdline();
+	if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
+	    (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
+	     cmd == SPEC_STORE_BYPASS_CMD_AUTO))
+		return mode;
+
+	switch (cmd) {
+	case SPEC_STORE_BYPASS_CMD_AUTO:
+	case SPEC_STORE_BYPASS_CMD_SECCOMP:
+		/*
+		 * Choose prctl+seccomp as the default mode if seccomp is
+		 * enabled.
+		 */
+		if (IS_ENABLED(CONFIG_SECCOMP))
+			mode = SPEC_STORE_BYPASS_SECCOMP;
+		else
+			mode = SPEC_STORE_BYPASS_PRCTL;
+		break;
+	case SPEC_STORE_BYPASS_CMD_ON:
+		mode = SPEC_STORE_BYPASS_DISABLE;
+		break;
+	case SPEC_STORE_BYPASS_CMD_PRCTL:
+		mode = SPEC_STORE_BYPASS_PRCTL;
+		break;
+	case SPEC_STORE_BYPASS_CMD_NONE:
+		break;
+	}
+
+	/*
+	 * We have three CPU feature flags that are in play here:
+	 *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
+	 *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
+	 *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
+	 */
+	if (mode == SPEC_STORE_BYPASS_DISABLE) {
+		setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
+		/*
+		 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
+		 * a completely different MSR and bit dependent on family.
+		 */
+		switch (boot_cpu_data.x86_vendor) {
+		case X86_VENDOR_INTEL:
+			x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
+			x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
+			wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+			break;
+		case X86_VENDOR_AMD:
+			x86_amd_ssb_disable();
+			break;
+		}
+	}
+
+	return mode;
+}
+
+static void ssb_select_mitigation(void)
+{
+	ssb_mode = __ssb_select_mitigation();
+
+	if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
+		pr_info("%s\n", ssb_strings[ssb_mode]);
+}
+
+#undef pr_fmt
+#define pr_fmt(fmt)     "Speculation prctl: " fmt
+
+static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+	bool update;
+
+	if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
+	    ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
+		return -ENXIO;
+
+	switch (ctrl) {
+	case PR_SPEC_ENABLE:
+		/* If speculation is force disabled, enable is not allowed */
+		if (task_spec_ssb_force_disable(task))
+			return -EPERM;
+		task_clear_spec_ssb_disable(task);
+		update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
+		break;
+	case PR_SPEC_DISABLE:
+		task_set_spec_ssb_disable(task);
+		update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+		break;
+	case PR_SPEC_FORCE_DISABLE:
+		task_set_spec_ssb_disable(task);
+		task_set_spec_ssb_force_disable(task);
+		update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	/*
+	 * If being set on non-current task, delay setting the CPU
+	 * mitigation until it is next scheduled.
+	 */
+	if (task == current && update)
+		speculative_store_bypass_update_current();
+
+	return 0;
+}
+
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+			     unsigned long ctrl)
+{
+	switch (which) {
+	case PR_SPEC_STORE_BYPASS:
+		return ssb_prctl_set(task, ctrl);
+	default:
+		return -ENODEV;
+	}
+}
+
+#ifdef CONFIG_SECCOMP
+void arch_seccomp_spec_mitigate(struct task_struct *task)
+{
+	if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
+		ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+}
+#endif
+
+static int ssb_prctl_get(struct task_struct *task)
+{
+	switch (ssb_mode) {
+	case SPEC_STORE_BYPASS_DISABLE:
+		return PR_SPEC_DISABLE;
+	case SPEC_STORE_BYPASS_SECCOMP:
+	case SPEC_STORE_BYPASS_PRCTL:
+		if (task_spec_ssb_force_disable(task))
+			return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+		if (task_spec_ssb_disable(task))
+			return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+	default:
+		if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
+			return PR_SPEC_ENABLE;
+		return PR_SPEC_NOT_AFFECTED;
+	}
+}
+
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+{
+	switch (which) {
+	case PR_SPEC_STORE_BYPASS:
+		return ssb_prctl_get(task);
+	default:
+		return -ENODEV;
+	}
+}
+
+void x86_spec_ctrl_setup_ap(void)
+{
+	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+		wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+	if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
+		x86_amd_ssb_disable();
+}
 
 #ifdef CONFIG_SYSFS
+
+static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+			       char *buf, unsigned int bug)
+{
+	if (!boot_cpu_has_bug(bug))
+		return sprintf(buf, "Not affected\n");
+
+	switch (bug) {
+	case X86_BUG_CPU_MELTDOWN:
+		if (boot_cpu_has(X86_FEATURE_KAISER))
+			return sprintf(buf, "Mitigation: PTI\n");
+
+		break;
+
+	case X86_BUG_SPECTRE_V1:
+		return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+
+	case X86_BUG_SPECTRE_V2:
+		return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+			       boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+			       boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+			       spectre_v2_module_string());
+
+	case X86_BUG_SPEC_STORE_BYPASS:
+		return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
+
+	default:
+		break;
+	}
+
+	return sprintf(buf, "Vulnerable\n");
+}
+
 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
 {
-	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
-		return sprintf(buf, "Not affected\n");
-	if (boot_cpu_has(X86_FEATURE_KAISER))
-		return sprintf(buf, "Mitigation: PTI\n");
-	return sprintf(buf, "Vulnerable\n");
+	return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
 }
 
 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
 {
-	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
-		return sprintf(buf, "Not affected\n");
-	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
 }
 
 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
 {
-	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
-		return sprintf(buf, "Not affected\n");
+	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
+}
 
-	return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
-		       boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
-		       boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
-		       spectre_v2_module_string());
+ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
 }
 #endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 301bbd1..b0fd028 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -725,17 +725,32 @@
 	 * and they also have a different bit for STIBP support. Also,
 	 * a hypervisor might have set the individual AMD bits even on
 	 * Intel CPUs, for finer-grained selection of what's available.
-	 *
-	 * We use the AMD bits in 0x8000_0008 EBX as the generic hardware
-	 * features, which are visible in /proc/cpuinfo and used by the
-	 * kernel. So set those accordingly from the Intel bits.
 	 */
 	if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
 		set_cpu_cap(c, X86_FEATURE_IBRS);
 		set_cpu_cap(c, X86_FEATURE_IBPB);
+		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
 	}
+
 	if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
 		set_cpu_cap(c, X86_FEATURE_STIBP);
+
+	if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
+	    cpu_has(c, X86_FEATURE_VIRT_SSBD))
+		set_cpu_cap(c, X86_FEATURE_SSBD);
+
+	if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
+		set_cpu_cap(c, X86_FEATURE_IBRS);
+		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+	}
+
+	if (cpu_has(c, X86_FEATURE_AMD_IBPB))
+		set_cpu_cap(c, X86_FEATURE_IBPB);
+
+	if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
+		set_cpu_cap(c, X86_FEATURE_STIBP);
+		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+	}
 }
 
 void get_cpu_cap(struct cpuinfo_x86 *c)
@@ -879,21 +894,55 @@
 	{}
 };
 
-static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c)
+static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_PINEVIEW	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_LINCROFT	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_PENWELL		},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_CLOVERVIEW	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_CEDARVIEW	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT1	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_AIRMONT		},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT2	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_MERRIFIELD	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_CORE_YONAH		},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNL		},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNM		},
+	{ X86_VENDOR_CENTAUR,	5,					},
+	{ X86_VENDOR_INTEL,	5,					},
+	{ X86_VENDOR_NSC,	5,					},
+	{ X86_VENDOR_AMD,	0x12,					},
+	{ X86_VENDOR_AMD,	0x11,					},
+	{ X86_VENDOR_AMD,	0x10,					},
+	{ X86_VENDOR_AMD,	0xf,					},
+	{ X86_VENDOR_ANY,	4,					},
+	{}
+};
+
+static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 {
 	u64 ia32_cap = 0;
 
-	if (x86_match_cpu(cpu_no_meltdown))
-		return false;
-
 	if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
 		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
 
+	if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
+	   !(ia32_cap & ARCH_CAP_SSB_NO))
+		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+	if (x86_match_cpu(cpu_no_speculation))
+		return;
+
+	setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+	setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
+	if (x86_match_cpu(cpu_no_meltdown))
+		return;
+
 	/* Rogue Data Cache Load? No! */
 	if (ia32_cap & ARCH_CAP_RDCL_NO)
-		return false;
+		return;
 
-	return true;
+	setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
 }
 
 /*
@@ -942,12 +991,7 @@
 
 	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
 
-	if (!x86_match_cpu(cpu_no_speculation)) {
-		if (cpu_vulnerable_to_meltdown(c))
-			setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
-		setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
-		setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
-	}
+	cpu_set_bug_bits(c);
 
 	fpu__init_system(c);
 
@@ -1315,6 +1359,7 @@
 #endif
 	mtrr_ap_init();
 	validate_apic_and_package_id(c);
+	x86_spec_ctrl_setup_ap();
 }
 
 struct msr_range {
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 2584265..3b19d82 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -46,4 +46,7 @@
 
 extern void get_cpu_cap(struct cpuinfo_x86 *c);
 extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
+
+extern void x86_spec_ctrl_setup_ap(void);
+
 #endif /* ARCH_X86_CPU_H */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 8fb1d65..93781e3 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -153,7 +153,10 @@
 		setup_clear_cpu_cap(X86_FEATURE_IBPB);
 		setup_clear_cpu_cap(X86_FEATURE_STIBP);
 		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
+		setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
 		setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
+		setup_clear_cpu_cap(X86_FEATURE_SSBD);
+		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
 	}
 
 	/*
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 4bcd30c..79291d6 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -474,7 +474,6 @@
  */
 static void save_mc_for_early(u8 *mc)
 {
-#ifdef CONFIG_HOTPLUG_CPU
 	/* Synchronization during CPU hotplug. */
 	static DEFINE_MUTEX(x86_cpu_microcode_mutex);
 
@@ -521,7 +520,6 @@
 
 out:
 	mutex_unlock(&x86_cpu_microcode_mutex);
-#endif
 }
 
 static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 3fe45f8..7a07b15 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -11,6 +11,7 @@
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/of_irq.h>
+#include <linux/libfdt.h>
 #include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/of_pci.h>
@@ -199,19 +200,22 @@
 static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
 			      unsigned int nr_irqs, void *arg)
 {
-	struct of_phandle_args *irq_data = (void *)arg;
+	struct irq_fwspec *fwspec = (struct irq_fwspec *)arg;
 	struct of_ioapic_type *it;
 	struct irq_alloc_info tmp;
+	int type_index;
 
-	if (WARN_ON(irq_data->args_count < 2))
-		return -EINVAL;
-	if (irq_data->args[1] >= ARRAY_SIZE(of_ioapic_type))
+	if (WARN_ON(fwspec->param_count < 2))
 		return -EINVAL;
 
-	it = &of_ioapic_type[irq_data->args[1]];
+	type_index = fwspec->param[1];
+	if (type_index >= ARRAY_SIZE(of_ioapic_type))
+		return -EINVAL;
+
+	it = &of_ioapic_type[type_index];
 	ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity);
 	tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain));
-	tmp.ioapic_pin = irq_data->args[0];
+	tmp.ioapic_pin = fwspec->param[0];
 
 	return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp);
 }
@@ -276,14 +280,15 @@
 
 	map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)128);
 
-	initial_boot_params = dt = early_memremap(initial_dtb, map_len);
-	size = of_get_flat_dt_size();
+	dt = early_memremap(initial_dtb, map_len);
+	size = fdt_totalsize(dt);
 	if (map_len < size) {
 		early_memunmap(dt, map_len);
-		initial_boot_params = dt = early_memremap(initial_dtb, size);
+		dt = early_memremap(initial_dtb, size);
 		map_len = size;
 	}
 
+	early_init_dt_verify(dt);
 	unflatten_and_copy_device_tree();
 	early_memunmap(dt, map_len);
 }
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index fa671b9..1808a9c 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -28,6 +28,7 @@
 #include <linux/kdebug.h>
 #include <linux/kallsyms.h>
 #include <linux/ftrace.h>
+#include <linux/frame.h>
 
 #include <asm/text-patching.h>
 #include <asm/cacheflush.h>
@@ -91,6 +92,7 @@
 }
 
 asm (
+			"optprobe_template_func:\n"
 			".global optprobe_template_entry\n"
 			"optprobe_template_entry:\n"
 #ifdef CONFIG_X86_64
@@ -128,7 +130,12 @@
 			"	popf\n"
 #endif
 			".global optprobe_template_end\n"
-			"optprobe_template_end:\n");
+			"optprobe_template_end:\n"
+			".type optprobe_template_func, @function\n"
+			".size optprobe_template_func, .-optprobe_template_func\n");
+
+void optprobe_template_func(void);
+STACK_FRAME_NON_STANDARD(optprobe_template_func);
 
 #define TMPL_MOVE_IDX \
 	((long)&optprobe_template_val - (long)&optprobe_template_entry)
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 469b23d..fd7e993 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -71,12 +71,17 @@
 static void machine_kexec_free_page_tables(struct kimage *image)
 {
 	free_page((unsigned long)image->arch.pgd);
+	image->arch.pgd = NULL;
 #ifdef CONFIG_X86_PAE
 	free_page((unsigned long)image->arch.pmd0);
+	image->arch.pmd0 = NULL;
 	free_page((unsigned long)image->arch.pmd1);
+	image->arch.pmd1 = NULL;
 #endif
 	free_page((unsigned long)image->arch.pte0);
+	image->arch.pte0 = NULL;
 	free_page((unsigned long)image->arch.pte1);
+	image->arch.pte1 = NULL;
 }
 
 static int machine_kexec_alloc_page_tables(struct kimage *image)
@@ -93,7 +98,6 @@
 	    !image->arch.pmd0 || !image->arch.pmd1 ||
 #endif
 	    !image->arch.pte0 || !image->arch.pte1) {
-		machine_kexec_free_page_tables(image);
 		return -ENOMEM;
 	}
 	return 0;
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index a5784a1..eae59ca 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -37,8 +37,11 @@
 static void free_transition_pgtable(struct kimage *image)
 {
 	free_page((unsigned long)image->arch.pud);
+	image->arch.pud = NULL;
 	free_page((unsigned long)image->arch.pmd);
+	image->arch.pmd = NULL;
 	free_page((unsigned long)image->arch.pte);
+	image->arch.pte = NULL;
 }
 
 static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
@@ -79,7 +82,6 @@
 	set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
 	return 0;
 err:
-	free_transition_pgtable(image);
 	return result;
 }
 
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 54b2711..e9195a1 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -33,6 +33,7 @@
 #include <asm/mce.h>
 #include <asm/vm86.h>
 #include <asm/switch_to.h>
+#include <asm/spec-ctrl.h>
 
 /*
  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -121,11 +122,6 @@
 	fpu__clear(&tsk->thread.fpu);
 }
 
-static void hard_disable_TSC(void)
-{
-	cr4_set_bits(X86_CR4_TSD);
-}
-
 void disable_TSC(void)
 {
 	preempt_disable();
@@ -134,15 +130,10 @@
 		 * Must flip the CPU state synchronously with
 		 * TIF_NOTSC in the current running context.
 		 */
-		hard_disable_TSC();
+		cr4_set_bits(X86_CR4_TSD);
 	preempt_enable();
 }
 
-static void hard_enable_TSC(void)
-{
-	cr4_clear_bits(X86_CR4_TSD);
-}
-
 static void enable_TSC(void)
 {
 	preempt_disable();
@@ -151,7 +142,7 @@
 		 * Must flip the CPU state synchronously with
 		 * TIF_NOTSC in the current running context.
 		 */
-		hard_enable_TSC();
+		cr4_clear_bits(X86_CR4_TSD);
 	preempt_enable();
 }
 
@@ -179,48 +170,199 @@
 	return 0;
 }
 
-void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
-		      struct tss_struct *tss)
+static inline void switch_to_bitmap(struct tss_struct *tss,
+				    struct thread_struct *prev,
+				    struct thread_struct *next,
+				    unsigned long tifp, unsigned long tifn)
 {
-	struct thread_struct *prev, *next;
-
-	prev = &prev_p->thread;
-	next = &next_p->thread;
-
-	if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
-	    test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
-		unsigned long debugctl = get_debugctlmsr();
-
-		debugctl &= ~DEBUGCTLMSR_BTF;
-		if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
-			debugctl |= DEBUGCTLMSR_BTF;
-
-		update_debugctlmsr(debugctl);
-	}
-
-	if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
-	    test_tsk_thread_flag(next_p, TIF_NOTSC)) {
-		/* prev and next are different */
-		if (test_tsk_thread_flag(next_p, TIF_NOTSC))
-			hard_disable_TSC();
-		else
-			hard_enable_TSC();
-	}
-
-	if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
+	if (tifn & _TIF_IO_BITMAP) {
 		/*
 		 * Copy the relevant range of the IO bitmap.
 		 * Normally this is 128 bytes or less:
 		 */
 		memcpy(tss->io_bitmap, next->io_bitmap_ptr,
 		       max(prev->io_bitmap_max, next->io_bitmap_max));
-	} else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
+	} else if (tifp & _TIF_IO_BITMAP) {
 		/*
 		 * Clear any possible leftover bits:
 		 */
 		memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
 	}
+}
+
+#ifdef CONFIG_SMP
+
+struct ssb_state {
+	struct ssb_state	*shared_state;
+	raw_spinlock_t		lock;
+	unsigned int		disable_state;
+	unsigned long		local_state;
+};
+
+#define LSTATE_SSB	0
+
+static DEFINE_PER_CPU(struct ssb_state, ssb_state);
+
+void speculative_store_bypass_ht_init(void)
+{
+	struct ssb_state *st = this_cpu_ptr(&ssb_state);
+	unsigned int this_cpu = smp_processor_id();
+	unsigned int cpu;
+
+	st->local_state = 0;
+
+	/*
+	 * Shared state setup happens once on the first bringup
+	 * of the CPU. It's not destroyed on CPU hotunplug.
+	 */
+	if (st->shared_state)
+		return;
+
+	raw_spin_lock_init(&st->lock);
+
+	/*
+	 * Go over HT siblings and check whether one of them has set up the
+	 * shared state pointer already.
+	 */
+	for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
+		if (cpu == this_cpu)
+			continue;
+
+		if (!per_cpu(ssb_state, cpu).shared_state)
+			continue;
+
+		/* Link it to the state of the sibling: */
+		st->shared_state = per_cpu(ssb_state, cpu).shared_state;
+		return;
+	}
+
+	/*
+	 * First HT sibling to come up on the core.  Link shared state of
+	 * the first HT sibling to itself. The siblings on the same core
+	 * which come up later will see the shared state pointer and link
+	 * themself to the state of this CPU.
+	 */
+	st->shared_state = st;
+}
+
+/*
+ * Logic is: First HT sibling enables SSBD for both siblings in the core
+ * and last sibling to disable it, disables it for the whole core. This how
+ * MSR_SPEC_CTRL works in "hardware":
+ *
+ *  CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
+ */
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
+{
+	struct ssb_state *st = this_cpu_ptr(&ssb_state);
+	u64 msr = x86_amd_ls_cfg_base;
+
+	if (!static_cpu_has(X86_FEATURE_ZEN)) {
+		msr |= ssbd_tif_to_amd_ls_cfg(tifn);
+		wrmsrl(MSR_AMD64_LS_CFG, msr);
+		return;
+	}
+
+	if (tifn & _TIF_SSBD) {
+		/*
+		 * Since this can race with prctl(), block reentry on the
+		 * same CPU.
+		 */
+		if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
+			return;
+
+		msr |= x86_amd_ls_cfg_ssbd_mask;
+
+		raw_spin_lock(&st->shared_state->lock);
+		/* First sibling enables SSBD: */
+		if (!st->shared_state->disable_state)
+			wrmsrl(MSR_AMD64_LS_CFG, msr);
+		st->shared_state->disable_state++;
+		raw_spin_unlock(&st->shared_state->lock);
+	} else {
+		if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
+			return;
+
+		raw_spin_lock(&st->shared_state->lock);
+		st->shared_state->disable_state--;
+		if (!st->shared_state->disable_state)
+			wrmsrl(MSR_AMD64_LS_CFG, msr);
+		raw_spin_unlock(&st->shared_state->lock);
+	}
+}
+#else
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
+{
+	u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
+
+	wrmsrl(MSR_AMD64_LS_CFG, msr);
+}
+#endif
+
+static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
+{
+	/*
+	 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
+	 * so ssbd_tif_to_spec_ctrl() just works.
+	 */
+	wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
+}
+
+static __always_inline void intel_set_ssb_state(unsigned long tifn)
+{
+	u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+
+	wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+}
+
+static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+{
+	if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
+		amd_set_ssb_virt_state(tifn);
+	else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+		amd_set_core_ssb_state(tifn);
+	else
+		intel_set_ssb_state(tifn);
+}
+
+void speculative_store_bypass_update(unsigned long tif)
+{
+	preempt_disable();
+	__speculative_store_bypass_update(tif);
+	preempt_enable();
+}
+
+void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+		      struct tss_struct *tss)
+{
+	struct thread_struct *prev, *next;
+	unsigned long tifp, tifn;
+
+	prev = &prev_p->thread;
+	next = &next_p->thread;
+
+	tifn = READ_ONCE(task_thread_info(next_p)->flags);
+	tifp = READ_ONCE(task_thread_info(prev_p)->flags);
+	switch_to_bitmap(tss, prev, next, tifp, tifn);
+
 	propagate_user_return_notify(prev_p, next_p);
+
+	if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
+	    arch_has_block_step()) {
+		unsigned long debugctl, msk;
+
+		rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+		debugctl &= ~DEBUGCTLMSR_BTF;
+		msk = tifn & _TIF_BLOCKSTEP;
+		debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
+		wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+	}
+
+	if ((tifp ^ tifn) & _TIF_NOTSC)
+		cr4_toggle_bits(X86_CR4_TSD);
+
+	if ((tifp ^ tifn) & _TIF_SSBD)
+		__speculative_store_bypass_update(tifn);
 }
 
 /*
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 03f21db..4a12362 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -9,6 +9,7 @@
 #include <linux/sched.h>
 #include <linux/tboot.h>
 #include <linux/delay.h>
+#include <linux/frame.h>
 #include <acpi/reboot.h>
 #include <asm/io.h>
 #include <asm/apic.h>
@@ -127,6 +128,7 @@
 #ifdef CONFIG_APM_MODULE
 EXPORT_SYMBOL(machine_real_restart);
 #endif
+STACK_FRAME_NON_STANDARD(machine_real_restart);
 
 /*
  * Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index e803d72..10b22fc 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -75,6 +75,7 @@
 #include <asm/i8259.h>
 #include <asm/realmode.h>
 #include <asm/misc.h>
+#include <asm/spec-ctrl.h>
 
 /* Number of siblings per CPU package */
 int smp_num_siblings = 1;
@@ -229,6 +230,8 @@
 	 */
 	check_tsc_sync_target();
 
+	speculative_store_bypass_ht_init();
+
 	/*
 	 * Lock vector_lock and initialize the vectors on this cpu
 	 * before setting the cpu online. We must set it online with
@@ -1325,6 +1328,8 @@
 	set_mtrr_aps_delayed_init();
 
 	smp_quirk_init_udelay();
+
+	speculative_store_bypass_ht_init();
 }
 
 void arch_enable_nonboot_cpus_begin(void)
@@ -1492,6 +1497,7 @@
 	cpumask_clear(topology_core_cpumask(cpu));
 	c->phys_proc_id = 0;
 	c->cpu_core_id = 0;
+	c->booted_cores = 0;
 	cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
 	recompute_smt_state();
 }
@@ -1591,6 +1597,8 @@
 	void *mwait_ptr;
 	int i;
 
+	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+		return;
 	if (!this_cpu_has(X86_FEATURE_MWAIT))
 		return;
 	if (!this_cpu_has(X86_FEATURE_CLFLUSH))
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index bbfb03ec..769c370 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -24,6 +24,7 @@
 #include <asm/geode.h>
 #include <asm/apic.h>
 #include <asm/intel-family.h>
+#include <asm/i8259.h>
 
 unsigned int __read_mostly cpu_khz;	/* TSC clocks / usec, not used here */
 EXPORT_SYMBOL(cpu_khz);
@@ -409,7 +410,7 @@
 	hpet2 -= hpet1;
 	tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
 	do_div(tmp, 1000000);
-	do_div(deltatsc, tmp);
+	deltatsc = div64_u64(deltatsc, tmp);
 
 	return (unsigned long) deltatsc;
 }
@@ -456,6 +457,20 @@
 	unsigned long tscmin, tscmax;
 	int pitcnt;
 
+	if (!has_legacy_pic()) {
+		/*
+		 * Relies on tsc_early_delay_calibrate() to have given us semi
+		 * usable udelay(), wait for the same 50ms we would have with
+		 * the PIT loop below.
+		 */
+		udelay(10 * USEC_PER_MSEC);
+		udelay(10 * USEC_PER_MSEC);
+		udelay(10 * USEC_PER_MSEC);
+		udelay(10 * USEC_PER_MSEC);
+		udelay(10 * USEC_PER_MSEC);
+		return ULONG_MAX;
+	}
+
 	/* Set the Gate high, disable speaker */
 	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
 
@@ -580,6 +595,9 @@
 	u64 tsc, delta;
 	unsigned long d1, d2;
 
+	if (!has_legacy_pic())
+		return 0;
+
 	/* Set the Gate high, disable speaker */
 	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
 
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index c7194e9..4ef267f 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -353,6 +353,7 @@
 	/DISCARD/ : {
 		*(.eh_frame)
 		*(__func_stack_frame_non_standard)
+		*(__unreachable)
 	}
 }
 
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 93f924d..7e5119c 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -357,7 +357,7 @@
 
 	/* cpuid 0x80000008.ebx */
 	const u32 kvm_cpuid_8000_0008_ebx_x86_features =
-		F(IBPB) | F(IBRS);
+		F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
 
 	/* cpuid 0xC0000001.edx */
 	const u32 kvm_cpuid_C000_0001_edx_x86_features =
@@ -382,7 +382,7 @@
 
 	/* cpuid 7.0.edx*/
 	const u32 kvm_cpuid_7_0_edx_x86_features =
-		F(SPEC_CTRL) | F(ARCH_CAPABILITIES);
+		F(SPEC_CTRL) | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
 
 	/* all calls to cpuid_count() should be made on the same cpu */
 	get_cpu();
@@ -468,6 +468,11 @@
 				entry->ecx &= ~F(PKU);
 			entry->edx &= kvm_cpuid_7_0_edx_x86_features;
 			cpuid_mask(&entry->edx, CPUID_7_EDX);
+			/*
+			 * We emulate ARCH_CAPABILITIES in software even
+			 * if the host doesn't support it.
+			 */
+			entry->edx |= F(ARCH_CAPABILITIES);
 		} else {
 			entry->ebx = 0;
 			entry->ecx = 0;
@@ -618,13 +623,20 @@
 			g_phys_as = phys_as;
 		entry->eax = g_phys_as | (virt_as << 8);
 		entry->edx = 0;
-		/* IBRS and IBPB aren't necessarily present in hardware cpuid */
-		if (boot_cpu_has(X86_FEATURE_IBPB))
-			entry->ebx |= F(IBPB);
-		if (boot_cpu_has(X86_FEATURE_IBRS))
-			entry->ebx |= F(IBRS);
+		/*
+		 * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
+		 * hardware cpuid
+		 */
+		if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
+			entry->ebx |= F(AMD_IBPB);
+		if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
+			entry->ebx |= F(AMD_IBRS);
+		if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+			entry->ebx |= F(VIRT_SSBD);
 		entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
 		cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
+		if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+			entry->ebx |= F(VIRT_SSBD);
 		break;
 	}
 	case 0x80000019:
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index d1beb71..c383697 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -165,21 +165,21 @@
 	struct kvm_cpuid_entry2 *best;
 
 	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
-	if (best && (best->ebx & bit(X86_FEATURE_IBPB)))
+	if (best && (best->ebx & bit(X86_FEATURE_AMD_IBPB)))
 		return true;
 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
 	return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
 }
 
-static inline bool guest_cpuid_has_ibrs(struct kvm_vcpu *vcpu)
+static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpuid_entry2 *best;
 
 	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
-	if (best && (best->ebx & bit(X86_FEATURE_IBRS)))
+	if (best && (best->ebx & bit(X86_FEATURE_AMD_IBRS)))
 		return true;
 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
-	return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
+	return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_SSBD)));
 }
 
 static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu)
@@ -190,6 +190,15 @@
 	return best && (best->edx & bit(X86_FEATURE_ARCH_CAPABILITIES));
 }
 
+static inline bool guest_cpuid_has_virt_ssbd(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpuid_entry2 *best;
+
+	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+	return best && (best->ebx & bit(X86_FEATURE_VIRT_SSBD));
+}
+
+
 
 /*
  * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5c3d416f..a8a86be 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -299,8 +299,16 @@
 	if (!lapic_in_kernel(vcpu))
 		return;
 
+	/*
+	 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
+	 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
+	 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
+	 * version first and level-triggered interrupts never get EOIed in
+	 * IOAPIC.
+	 */
 	feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
-	if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
+	if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) &&
+	    !ioapic_in_kernel(vcpu->kvm))
 		v |= APIC_LVR_DIRECTED_EOI;
 	kvm_lapic_set_reg(apic, APIC_LVR, v);
 }
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index aaa93b4..c4cd128 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -36,6 +36,7 @@
 #include <linux/slab.h>
 #include <linux/amd-iommu.h>
 #include <linux/hashtable.h>
+#include <linux/frame.h>
 
 #include <asm/apic.h>
 #include <asm/perf_event.h>
@@ -45,7 +46,7 @@
 #include <asm/kvm_para.h>
 #include <asm/irq_remapping.h>
 #include <asm/microcode.h>
-#include <asm/nospec-branch.h>
+#include <asm/spec-ctrl.h>
 
 #include <asm/virtext.h>
 #include "trace.h"
@@ -185,6 +186,12 @@
 	} host;
 
 	u64 spec_ctrl;
+	/*
+	 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
+	 * translated into the appropriate L2_CFG bits on the host to
+	 * perform speculative control.
+	 */
+	u64 virt_spec_ctrl;
 
 	u32 *msrpm;
 
@@ -1561,6 +1568,7 @@
 	u32 eax = 1;
 
 	svm->spec_ctrl = 0;
+	svm->virt_spec_ctrl = 0;
 
 	if (!init_event) {
 		svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
@@ -3545,11 +3553,18 @@
 		break;
 	case MSR_IA32_SPEC_CTRL:
 		if (!msr_info->host_initiated &&
-		    !guest_cpuid_has_ibrs(vcpu))
+		    !guest_cpuid_has_spec_ctrl(vcpu))
 			return 1;
 
 		msr_info->data = svm->spec_ctrl;
 		break;
+	case MSR_AMD64_VIRT_SPEC_CTRL:
+		if (!msr_info->host_initiated &&
+		    !guest_cpuid_has_virt_ssbd(vcpu))
+			return 1;
+
+		msr_info->data = svm->virt_spec_ctrl;
+		break;
 	case MSR_IA32_UCODE_REV:
 		msr_info->data = 0x01000065;
 		break;
@@ -3643,7 +3658,7 @@
 		break;
 	case MSR_IA32_SPEC_CTRL:
 		if (!msr->host_initiated &&
-		    !guest_cpuid_has_ibrs(vcpu))
+		    !guest_cpuid_has_spec_ctrl(vcpu))
 			return 1;
 
 		/* The STIBP bit doesn't fault even if it's not advertised */
@@ -3684,6 +3699,16 @@
 			break;
 		set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
 		break;
+	case MSR_AMD64_VIRT_SPEC_CTRL:
+		if (!msr->host_initiated &&
+		    !guest_cpuid_has_virt_ssbd(vcpu))
+			return 1;
+
+		if (data & ~SPEC_CTRL_SSBD)
+			return 1;
+
+		svm->virt_spec_ctrl = data;
+		break;
 	case MSR_STAR:
 		svm->vmcb->save.star = data;
 		break;
@@ -4917,8 +4942,7 @@
 	 * is no need to worry about the conditional branch over the wrmsr
 	 * being speculatively taken.
 	 */
-	if (svm->spec_ctrl)
-		native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+	x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
 
 	asm volatile (
 		"push %%" _ASM_BP "; \n\t"
@@ -5012,6 +5036,18 @@
 #endif
 		);
 
+	/* Eliminate branch target predictions from guest mode */
+	vmexit_fill_RSB();
+
+#ifdef CONFIG_X86_64
+	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
+#else
+	loadsegment(fs, svm->host.fs);
+#ifndef CONFIG_X86_32_LAZY_GS
+	loadsegment(gs, svm->host.gs);
+#endif
+#endif
+
 	/*
 	 * We do not use IBRS in the kernel. If this vCPU has used the
 	 * SPEC_CTRL MSR it may have left it on; save the value and
@@ -5030,20 +5066,7 @@
 	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
 		svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-	if (svm->spec_ctrl)
-		native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
-
-	/* Eliminate branch target predictions from guest mode */
-	vmexit_fill_RSB();
-
-#ifdef CONFIG_X86_64
-	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
-#else
-	loadsegment(fs, svm->host.fs);
-#ifndef CONFIG_X86_32_LAZY_GS
-	loadsegment(gs, svm->host.gs);
-#endif
-#endif
+	x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
 
 	reload_tss(vcpu);
 
@@ -5089,6 +5112,7 @@
 
 	mark_all_clean(svm->vmcb);
 }
+STACK_FRAME_NON_STANDARD(svm_vcpu_run);
 
 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
 {
@@ -5145,7 +5169,7 @@
 	return false;
 }
 
-static bool svm_has_high_real_mode_segbase(void)
+static bool svm_has_emulated_msr(int index)
 {
 	return true;
 }
@@ -5462,7 +5486,7 @@
 	.hardware_enable = svm_hardware_enable,
 	.hardware_disable = svm_hardware_disable,
 	.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
-	.cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
+	.has_emulated_msr = svm_has_emulated_msr,
 
 	.vcpu_create = svm_create_vcpu,
 	.vcpu_free = svm_free_vcpu,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ff2030f..ebceda2 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <linux/tboot.h>
 #include <linux/hrtimer.h>
+#include <linux/frame.h>
 #include <linux/nospec.h>
 #include "kvm_cache_regs.h"
 #include "x86.h"
@@ -50,7 +51,7 @@
 #include <asm/apic.h>
 #include <asm/irq_remapping.h>
 #include <asm/microcode.h>
-#include <asm/nospec-branch.h>
+#include <asm/spec-ctrl.h>
 
 #include "trace.h"
 #include "pmu.h"
@@ -2558,6 +2559,8 @@
 		return;
 	}
 
+	WARN_ON_ONCE(vmx->emulation_required);
+
 	if (kvm_exception_is_soft(nr)) {
 		vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
 			     vmx->vcpu.arch.event_exit_inst_len);
@@ -3020,7 +3023,7 @@
 		break;
 	case MSR_IA32_SPEC_CTRL:
 		if (!msr_info->host_initiated &&
-		    !guest_cpuid_has_ibrs(vcpu))
+		    !guest_cpuid_has_spec_ctrl(vcpu))
 			return 1;
 
 		msr_info->data = to_vmx(vcpu)->spec_ctrl;
@@ -3137,11 +3140,11 @@
 		break;
 	case MSR_IA32_SPEC_CTRL:
 		if (!msr_info->host_initiated &&
-		    !guest_cpuid_has_ibrs(vcpu))
+		    !guest_cpuid_has_spec_ctrl(vcpu))
 			return 1;
 
 		/* The STIBP bit doesn't fault even if it's not advertised */
-		if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
+		if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
 			return 1;
 
 		vmx->spec_ctrl = data;
@@ -6430,12 +6433,12 @@
 			goto out;
 		}
 
-		if (err != EMULATE_DONE) {
-			vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-			vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
-			vcpu->run->internal.ndata = 0;
-			return 0;
-		}
+		if (err != EMULATE_DONE)
+			goto emulation_error;
+
+		if (vmx->emulation_required && !vmx->rmode.vm86_active &&
+		    vcpu->arch.exception.pending)
+			goto emulation_error;
 
 		if (vcpu->arch.halt_request) {
 			vcpu->arch.halt_request = 0;
@@ -6451,6 +6454,12 @@
 
 out:
 	return ret;
+
+emulation_error:
+	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+	vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+	vcpu->run->internal.ndata = 0;
+	return 0;
 }
 
 static int __grow_ple_window(int val)
@@ -8689,10 +8698,23 @@
 			);
 	}
 }
+STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
 
-static bool vmx_has_high_real_mode_segbase(void)
+static bool vmx_has_emulated_msr(int index)
 {
-	return enable_unrestricted_guest || emulate_invalid_guest_state;
+	switch (index) {
+	case MSR_IA32_SMBASE:
+		/*
+		 * We cannot do SMM unless we can run the guest in big
+		 * real mode.
+		 */
+		return enable_unrestricted_guest || emulate_invalid_guest_state;
+	case MSR_AMD64_VIRT_SPEC_CTRL:
+		/* This is AMD only.  */
+		return false;
+	default:
+		return true;
+	}
 }
 
 static bool vmx_mpx_supported(void)
@@ -8915,10 +8937,10 @@
 	 * is no need to worry about the conditional branch over the wrmsr
 	 * being speculatively taken.
 	 */
-	if (vmx->spec_ctrl)
-		native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+	x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
 
 	vmx->__launched = vmx->loaded_vmcs->launched;
+
 	asm(
 		/* Store host registers */
 		"push %%" _ASM_DX "; push %%" _ASM_BP ";"
@@ -9054,8 +9076,7 @@
 	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
 		vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-	if (vmx->spec_ctrl)
-		native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+	x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
 
 	/* Eliminate branch target predictions from guest mode */
 	vmexit_fill_RSB();
@@ -9118,6 +9139,7 @@
 	vmx_recover_nmi_blocking(vmx);
 	vmx_complete_interrupts(vmx);
 }
+STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
 
 static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
 {
@@ -11346,7 +11368,7 @@
 	.hardware_enable = hardware_enable,
 	.hardware_disable = hardware_disable,
 	.cpu_has_accelerated_tpr = report_flexpriority,
-	.cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
+	.has_emulated_msr = vmx_has_emulated_msr,
 
 	.vcpu_create = vmx_create_vcpu,
 	.vcpu_free = vmx_free_vcpu,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3aaaf30..4aa265a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1002,6 +1002,7 @@
 	MSR_IA32_MCG_CTL,
 	MSR_IA32_MCG_EXT_CTL,
 	MSR_IA32_SMBASE,
+	MSR_AMD64_VIRT_SPEC_CTRL,
 };
 
 static unsigned num_emulated_msrs;
@@ -2664,7 +2665,7 @@
 		 * fringe case that is not enabled except via specific settings
 		 * of the module parameters.
 		 */
-		r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
+		r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
 		break;
 	case KVM_CAP_COALESCED_MMIO:
 		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
@@ -4130,13 +4131,14 @@
 		mutex_unlock(&kvm->lock);
 		break;
 	case KVM_XEN_HVM_CONFIG: {
+		struct kvm_xen_hvm_config xhc;
 		r = -EFAULT;
-		if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
-				   sizeof(struct kvm_xen_hvm_config)))
+		if (copy_from_user(&xhc, argp, sizeof(xhc)))
 			goto out;
 		r = -EINVAL;
-		if (kvm->arch.xen_hvm_config.flags)
+		if (xhc.flags)
 			goto out;
+		memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
 		r = 0;
 		break;
 	}
@@ -4226,14 +4228,8 @@
 	num_msrs_to_save = j;
 
 	for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
-		switch (emulated_msrs[i]) {
-		case MSR_IA32_SMBASE:
-			if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
-				continue;
-			break;
-		default:
-			break;
-		}
+		if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
+			continue;
 
 		if (j < i)
 			emulated_msrs[j] = emulated_msrs[i];
@@ -7263,6 +7259,7 @@
 {
 	struct msr_data apic_base_msr;
 	int mmu_reset_needed = 0;
+	int cpuid_update_needed = 0;
 	int pending_vec, max_bits, idx;
 	struct desc_ptr dt;
 
@@ -7294,8 +7291,10 @@
 	vcpu->arch.cr0 = sregs->cr0;
 
 	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
+	cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
+				(X86_CR4_OSXSAVE | X86_CR4_PKE));
 	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
-	if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE))
+	if (cpuid_update_needed)
 		kvm_update_cpuid(vcpu);
 
 	idx = srcu_read_lock(&vcpu->kvm->srcu);
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index c815564..10ffa7e 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -13,14 +13,14 @@
 .macro op_safe_regs op
 ENTRY(\op\()_safe_regs)
 	pushq %rbx
-	pushq %rbp
+	pushq %r12
 	movq	%rdi, %r10	/* Save pointer */
 	xorl	%r11d, %r11d	/* Return value */
 	movl    (%rdi), %eax
 	movl    4(%rdi), %ecx
 	movl    8(%rdi), %edx
 	movl    12(%rdi), %ebx
-	movl    20(%rdi), %ebp
+	movl    20(%rdi), %r12d
 	movl    24(%rdi), %esi
 	movl    28(%rdi), %edi
 1:	\op
@@ -29,10 +29,10 @@
 	movl    %ecx, 4(%r10)
 	movl    %edx, 8(%r10)
 	movl    %ebx, 12(%r10)
-	movl    %ebp, 20(%r10)
+	movl    %r12d, 20(%r10)
 	movl    %esi, 24(%r10)
 	movl    %edi, 28(%r10)
-	popq %rbp
+	popq %r12
 	popq %rbx
 	ret
 3:
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 7df8e3a..d35d0e4 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1014,8 +1014,7 @@
 	after_bootmem = 1;
 
 	/* Register memory areas for /proc/kcore */
-	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
-			 PAGE_SIZE, KCORE_OTHER);
+	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
 
 	mem_init_print_info(NULL);
 }
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 73dcb0e1..dcd6714 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -279,9 +279,11 @@
 
 	/*
 	 * The .rodata section needs to be read-only. Using the pfn
-	 * catches all aliases.
+	 * catches all aliases.  This also includes __ro_after_init,
+	 * so do not enforce until kernel_set_to_readonly is true.
 	 */
-	if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
+	if (kernel_set_to_readonly &&
+	    within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
 		   __pa_symbol(__end_rodata) >> PAGE_SHIFT))
 		pgprot_val(forbidden) |= _PAGE_RW;
 
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index b97ef29..a3b63e5 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -1,5 +1,6 @@
 #include <linux/mm.h>
 #include <linux/gfp.h>
+#include <linux/hugetlb.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
 #include <asm/tlb.h>
@@ -577,6 +578,10 @@
 	    (mtrr != MTRR_TYPE_WRBACK))
 		return 0;
 
+	/* Bail out if we are we on a populated non-leaf entry: */
+	if (pud_present(*pud) && !pud_huge(*pud))
+		return 0;
+
 	prot = pgprot_4k_2_large(prot);
 
 	set_pte((pte_t *)pud, pfn_pte(
@@ -605,6 +610,10 @@
 		return 0;
 	}
 
+	/* Bail out if we are we on a populated non-leaf entry: */
+	if (pmd_present(*pmd) && !pmd_huge(*pmd))
+		return 0;
+
 	prot = pgprot_4k_2_large(prot);
 
 	set_pte((pte_t *)pmd, pfn_pte(
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index f88ce0e..0bbec04 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -95,26 +95,27 @@
 	 */
 	if (pkey != -1)
 		return pkey;
-	/*
-	 * Look for a protection-key-drive execute-only mapping
-	 * which is now being given permissions that are not
-	 * execute-only.  Move it back to the default pkey.
-	 */
-	if (vma_is_pkey_exec_only(vma) &&
-	    (prot & (PROT_READ|PROT_WRITE))) {
-		return 0;
-	}
+
 	/*
 	 * The mapping is execute-only.  Go try to get the
 	 * execute-only protection key.  If we fail to do that,
 	 * fall through as if we do not have execute-only
-	 * support.
+	 * support in this mm.
 	 */
 	if (prot == PROT_EXEC) {
 		pkey = execute_only_pkey(vma->vm_mm);
 		if (pkey > 0)
 			return pkey;
+	} else if (vma_is_pkey_exec_only(vma)) {
+		/*
+		 * Protections are *not* PROT_EXEC, but the mapping
+		 * is using the exec-only pkey.  This mapping was
+		 * PROT_EXEC and will no longer be.  Move back to
+		 * the default pkey.
+		 */
+		return ARCH_DEFAULT_PKEY;
 	}
+
 	/*
 	 * This is a vanilla, non-pkey mprotect (or we failed to
 	 * setup execute-only), inherit the pkey from the VMA we
diff --git a/arch/x86/net/Makefile b/arch/x86/net/Makefile
index 90568c3..fefb4b6 100644
--- a/arch/x86/net/Makefile
+++ b/arch/x86/net/Makefile
@@ -1,4 +1,6 @@
 #
 # Arch-specific network modules
 #
+OBJECT_FILES_NON_STANDARD_bpf_jit.o += y
+
 obj-$(CONFIG_BPF_JIT) += bpf_jit.o bpf_jit_comp.o
diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile
index 066619b..7a25502 100644
--- a/arch/x86/platform/efi/Makefile
+++ b/arch/x86/platform/efi/Makefile
@@ -1,4 +1,5 @@
 OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y
+OBJECT_FILES_NON_STANDARD_efi_stub_$(BITS).o := y
 
 obj-$(CONFIG_EFI) 		+= quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o
 obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
diff --git a/arch/x86/power/Makefile b/arch/x86/power/Makefile
index a6a198c..0504187 100644
--- a/arch/x86/power/Makefile
+++ b/arch/x86/power/Makefile
@@ -1,3 +1,5 @@
+OBJECT_FILES_NON_STANDARD_hibernate_asm_$(BITS).o := y
+
 # __restore_processor_state() restores %gs after S3 resume and so should not
 # itself be stack-protected
 nostackp := $(call cc-option, -fno-stack-protector)
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
index 9f14bd3..74b516c 100644
--- a/arch/x86/power/hibernate_32.c
+++ b/arch/x86/power/hibernate_32.c
@@ -142,7 +142,7 @@
 #endif
 }
 
-int swsusp_arch_resume(void)
+asmlinkage int swsusp_arch_resume(void)
 {
 	int error;
 
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 9634557..0cb1dd4 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -149,7 +149,7 @@
 	return 0;
 }
 
-int swsusp_arch_resume(void)
+asmlinkage int swsusp_arch_resume(void)
 {
 	int error;
 
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index e47e527..4a54059 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -1,3 +1,6 @@
+OBJECT_FILES_NON_STANDARD_xen-asm_$(BITS).o := y
+OBJECT_FILES_NON_STANDARD_xen-pvh.o := y
+
 ifdef CONFIG_FUNCTION_TRACER
 # Do not profile debug and lowlevel utilities
 CFLAGS_REMOVE_spinlock.o = -pg
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2bea87c..e3a3f5a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -75,6 +75,7 @@
 #include <asm/mwait.h>
 #include <asm/pci_x86.h>
 #include <asm/cpu.h>
+#include <asm/unwind_hints.h>
 
 #ifdef CONFIG_ACPI
 #include <linux/acpi.h>
@@ -1452,10 +1453,12 @@
 		 * GDT. The new GDT has  __KERNEL_CS with CS.L = 1
 		 * and we are jumping to reload it.
 		 */
-		asm volatile ("pushq %0\n"
+		asm volatile (UNWIND_HINT_SAVE
+			      "pushq %0\n"
 			      "leaq 1f(%%rip),%0\n"
 			      "pushq %0\n"
 			      "lretq\n"
+			      UNWIND_HINT_RESTORE
 			      "1:\n"
 			      : "=&r" (dummy) : "0" (__KERNEL_CS));
 
@@ -1977,10 +1980,8 @@
 
 static void xen_set_cpu_features(struct cpuinfo_x86 *c)
 {
-	if (xen_pv_domain()) {
-		clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+	if (xen_pv_domain())
 		set_cpu_cap(c, X86_FEATURE_XENPV);
-	}
 }
 
 static void xen_pin_vcpu(int cpu)
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 418f1b8..c92f75f 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1317,8 +1317,6 @@
 	struct mmuext_op *op;
 	struct multicall_space mcs;
 
-	trace_xen_mmu_flush_tlb_all(0);
-
 	preempt_disable();
 
 	mcs = xen_mc_entry(sizeof(*op));
@@ -1336,8 +1334,6 @@
 	struct mmuext_op *op;
 	struct multicall_space mcs;
 
-	trace_xen_mmu_flush_tlb(0);
-
 	preempt_disable();
 
 	mcs = xen_mc_entry(sizeof(*op));
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index 72bfc1c..5bfbc1c 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -44,18 +44,10 @@
 	: "r" (uaddr), "I" (-EFAULT), "r" (oparg)	\
 	: "memory")
 
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+		u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
 
 #if !XCHAL_HAVE_S32C1I
 	return -ENOSYS;
@@ -89,19 +81,10 @@
 
 	pagefault_enable();
 
-	if (ret)
-		return ret;
+	if (!ret)
+		*oval = oldval;
 
-	switch (cmp) {
-	case FUTEX_OP_CMP_EQ: return (oldval == cmparg);
-	case FUTEX_OP_CMP_NE: return (oldval != cmparg);
-	case FUTEX_OP_CMP_LT: return (oldval < cmparg);
-	case FUTEX_OP_CMP_GE: return (oldval >= cmparg);
-	case FUTEX_OP_CMP_LE: return (oldval <= cmparg);
-	case FUTEX_OP_CMP_GT: return (oldval > cmparg);
-	}
-
-	return -ENOSYS;
+	return ret;
 }
 
 static inline int
diff --git a/block/bio.c b/block/bio.c
index 4f93345..91b6462 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -565,6 +565,15 @@
 }
 EXPORT_SYMBOL(bio_phys_segments);
 
+static inline void bio_clone_crypt_key(struct bio *dst, const struct bio *src)
+{
+#ifdef CONFIG_PFK
+	dst->bi_crypt_key = src->bi_crypt_key;
+	dst->bi_iter.bi_dun = src->bi_iter.bi_dun;
+#endif
+	dst->bi_dio_inode = src->bi_dio_inode;
+}
+
 /**
  * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
  * 	@bio: destination bio
@@ -589,7 +598,8 @@
 	bio->bi_opf = bio_src->bi_opf;
 	bio->bi_iter = bio_src->bi_iter;
 	bio->bi_io_vec = bio_src->bi_io_vec;
-
+	bio->bi_dio_inode = bio_src->bi_dio_inode;
+	bio_clone_crypt_key(bio, bio_src);
 	bio_clone_blkcg_association(bio, bio_src);
 }
 EXPORT_SYMBOL(__bio_clone_fast);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index abde370..f44daa1 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -6,9 +6,9 @@
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/scatterlist.h>
-
+#include <linux/pfk.h>
 #include <trace/events/block.h>
-
+#include <linux/pfk.h>
 #include "blk.h"
 
 static struct bio *blk_bio_discard_split(struct request_queue *q,
@@ -725,6 +725,11 @@
 	}
 }
 
+static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt)
+{
+	return (!pfk_allow_merge_bio(bio, nxt));
+}
+
 /*
  * Has to be called with the request spinlock acquired
  */
@@ -752,6 +757,8 @@
 	    !blk_write_same_mergeable(req->bio, next->bio))
 		return 0;
 
+	if (crypto_not_mergeable(req->bio, next->bio))
+		return 0;
 	/*
 	 * If we are allowed to merge, then append bio list
 	 * from next to rq and release next. merge_requests_fn
@@ -862,6 +869,8 @@
 	    !blk_write_same_mergeable(rq->bio, bio))
 		return false;
 
+	if (crypto_not_mergeable(rq->bio, bio))
+		return false;
 	return true;
 }
 
diff --git a/block/elevator.c b/block/elevator.c
index f7d973a..6dd2ca4 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -425,7 +425,10 @@
 	/*
 	 * First try one-hit cache.
 	 */
-	if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
+	if (q->last_merge) {
+		if (!elv_bio_merge_ok(q->last_merge, bio))
+			return ELEVATOR_NO_MERGE;
+
 		ret = blk_try_merge(q->last_merge, bio);
 		if (ret != ELEVATOR_NO_MERGE) {
 			*req = q->last_merge;
diff --git a/block/ioctl.c b/block/ioctl.c
index d4a78d0..c4555b1 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -564,8 +564,6 @@
 		if ((size >> 9) > ~0UL)
 			return -EFBIG;
 		return put_ulong(arg, size >> 9);
-	case BLKGETSTPART:
-		return put_ulong(arg, bdev->bd_part->start_sect);
 	case BLKGETSIZE64:
 		return put_u64(arg, i_size_read(bdev->bd_inode));
 	case BLKTRACESTART:
diff --git a/build.config.cuttlefish.x86_64 b/build.config.cuttlefish.x86_64
index 5a96563..8d56143 100644
--- a/build.config.cuttlefish.x86_64
+++ b/build.config.cuttlefish.x86_64
@@ -6,10 +6,11 @@
 EXTRA_CMDS=''
 KERNEL_DIR=common
 POST_DEFCONFIG_CMDS="check_defconfig"
-CLANG_PREBUILT_BIN=prebuilts/clang/host/linux-x86/clang-4630689/bin
+CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r328903/bin
 LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin
 FILES="
 arch/x86/boot/bzImage
 vmlinux
 System.map
 "
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.arm b/build.config.goldfish.arm
index 866da93..ff5646a 100644
--- a/build.config.goldfish.arm
+++ b/build.config.goldfish.arm
@@ -10,3 +10,4 @@
 vmlinux
 System.map
 "
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.arm64 b/build.config.goldfish.arm64
index 9c963cf..4c896a6 100644
--- a/build.config.goldfish.arm64
+++ b/build.config.goldfish.arm64
@@ -10,3 +10,4 @@
 vmlinux
 System.map
 "
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.mips b/build.config.goldfish.mips
index 8af53d2..9a14a44 100644
--- a/build.config.goldfish.mips
+++ b/build.config.goldfish.mips
@@ -9,3 +9,4 @@
 vmlinux
 System.map
 "
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.mips64 b/build.config.goldfish.mips64
index 2a33d36..6ad9759 100644
--- a/build.config.goldfish.mips64
+++ b/build.config.goldfish.mips64
@@ -9,3 +9,4 @@
 vmlinux
 System.map
 "
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.x86 b/build.config.goldfish.x86
index f86253f..2266c62 100644
--- a/build.config.goldfish.x86
+++ b/build.config.goldfish.x86
@@ -10,3 +10,4 @@
 vmlinux
 System.map
 "
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.x86_64 b/build.config.goldfish.x86_64
index e173886..08c42c2 100644
--- a/build.config.goldfish.x86_64
+++ b/build.config.goldfish.x86_64
@@ -10,3 +10,4 @@
 vmlinux
 System.map
 "
+STOP_SHIP_TRACEPRINTK=1
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index ca50eeb..b5953f1 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -157,16 +157,16 @@
 	void *private;
 	int err;
 
-	/* If caller uses non-allowed flag, return error. */
-	if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
-		return -EINVAL;
-
 	if (sock->state == SS_CONNECTED)
 		return -EINVAL;
 
 	if (addr_len != sizeof(*sa))
 		return -EINVAL;
 
+	/* If caller uses non-allowed flag, return error. */
+	if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
+		return -EINVAL;
+
 	sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
 	sa->salg_name[sizeof(sa->salg_name) - 1] = 0;
 
diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
index f6a009d..52e5ea3 100644
--- a/crypto/asymmetric_keys/pkcs7_trust.c
+++ b/crypto/asymmetric_keys/pkcs7_trust.c
@@ -106,6 +106,7 @@
 		pr_devel("sinfo %u: Direct signer is key %x\n",
 			 sinfo->index, key_serial(key));
 		x509 = NULL;
+		sig = sinfo->sig;
 		goto matched;
 	}
 	if (PTR_ERR(key) != -ENOKEY)
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 942ddff..4bb5f93 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1134,8 +1134,10 @@
 	if (!drbg)
 		return;
 	kzfree(drbg->Vbuf);
+	drbg->Vbuf = NULL;
 	drbg->V = NULL;
 	kzfree(drbg->Cbuf);
+	drbg->Cbuf = NULL;
 	drbg->C = NULL;
 	kzfree(drbg->scratchpadbuf);
 	drbg->scratchpadbuf = NULL;
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index eb76a4c..8ce203f 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -109,6 +109,7 @@
 		cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
 	if (cpumask_empty(tmp)) {
 		mutex_unlock(&round_robin_lock);
+		free_cpumask_var(tmp);
 		return;
 	}
 	for_each_cpu(cpu, tmp) {
@@ -126,6 +127,8 @@
 	mutex_unlock(&round_robin_lock);
 
 	set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
+
+	free_cpumask_var(tmp);
 }
 
 static void exit_round_robin(unsigned int tsk_index)
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 94e04c9..667dc5c 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -2069,6 +2069,25 @@
 	return opregion;
 }
 
+static bool dmi_is_desktop(void)
+{
+	const char *chassis_type;
+
+	chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
+	if (!chassis_type)
+		return false;
+
+	if (!strcmp(chassis_type, "3") || /*  3: Desktop */
+	    !strcmp(chassis_type, "4") || /*  4: Low Profile Desktop */
+	    !strcmp(chassis_type, "5") || /*  5: Pizza Box */
+	    !strcmp(chassis_type, "6") || /*  6: Mini Tower */
+	    !strcmp(chassis_type, "7") || /*  7: Tower */
+	    !strcmp(chassis_type, "11"))  /* 11: Main Server Chassis */
+		return true;
+
+	return false;
+}
+
 int acpi_video_register(void)
 {
 	int ret = 0;
@@ -2089,8 +2108,12 @@
 	 * win8 ready (where we also prefer the native backlight driver, so
 	 * normally the acpi_video code should not register there anyways).
 	 */
-	if (only_lcd == -1)
-		only_lcd = acpi_osi_is_win8();
+	if (only_lcd == -1) {
+		if (dmi_is_desktop() && acpi_osi_is_win8())
+			only_lcd = true;
+		else
+			only_lcd = false;
+	}
 
 	dmi_check_system(video_dmi_table);
 
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 80fc0b9..f362841 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -204,6 +204,7 @@
 	u32 fixed_status;
 	u32 fixed_enable;
 	u32 i;
+	acpi_status status;
 
 	ACPI_FUNCTION_NAME(ev_fixed_event_detect);
 
@@ -211,8 +212,12 @@
 	 * Read the fixed feature status and enable registers, as all the cases
 	 * depend on their values. Ignore errors here.
 	 */
-	(void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
-	(void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
+	status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
+	status |=
+	    acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
+	if (ACPI_FAILURE(status)) {
+		return (int_status);
+	}
 
 	ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
 			  "Fixed Event Block: Enable %08X Status %08X\n",
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 5d59cfc..c5d6701 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -308,6 +308,14 @@
 		/* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
 
 		status = AE_OK;
+	} else if (ACPI_FAILURE(status)) {
+
+		/* If return_object exists, delete it */
+
+		if (info->return_object) {
+			acpi_ut_remove_reference(info->return_object);
+			info->return_object = NULL;
+		}
 	}
 
 	ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index bb01dea..9825780 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -161,7 +161,7 @@
 {
 	int ret;
 
-	if (ignore_ppc) {
+	if (ignore_ppc || !pr->performance) {
 		/*
 		 * Only when it is notification event, the _OST object
 		 * will be evaluated. Otherwise it is skipped.
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index cf725d5..145dcf2 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1422,6 +1422,8 @@
 	device_initialize(&device->dev);
 	dev_set_uevent_suppress(&device->dev, true);
 	acpi_init_coherency(device);
+	/* Assume there are unmet deps until acpi_device_dep_initialize() runs */
+	device->dep_unmet = 1;
 }
 
 void acpi_device_add_finalize(struct acpi_device *device)
@@ -1445,6 +1447,14 @@
 	}
 
 	acpi_init_device_object(device, handle, type, sta);
+	/*
+	 * For ACPI_BUS_TYPE_DEVICE getting the status is delayed till here so
+	 * that we can call acpi_bus_get_status() and use its quirk handling.
+	 * Note this must be done before the get power-/wakeup_dev-flags calls.
+	 */
+	if (type == ACPI_BUS_TYPE_DEVICE)
+		acpi_bus_get_status(device);
+
 	acpi_bus_get_power_flags(device);
 	acpi_bus_get_wakeup_device_flags(device);
 
@@ -1517,9 +1527,11 @@
 			return -ENODEV;
 
 		*type = ACPI_BUS_TYPE_DEVICE;
-		status = acpi_bus_get_status_handle(handle, sta);
-		if (ACPI_FAILURE(status))
-			*sta = 0;
+		/*
+		 * acpi_add_single_object updates this once we've an acpi_device
+		 * so that acpi_bus_get_status' quirk handling can be used.
+		 */
+		*sta = 0;
 		break;
 	case ACPI_TYPE_PROCESSOR:
 		*type = ACPI_BUS_TYPE_PROCESSOR;
@@ -1621,6 +1633,8 @@
 	acpi_status status;
 	int i;
 
+	adev->dep_unmet = 0;
+
 	if (!acpi_has_method(adev->handle, "_DEP"))
 		return;
 
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 7394aac..93888cc 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -69,11 +69,12 @@
 				    struct device_attribute *attr, char *buf)
 {
 	struct amba_device *dev = to_amba_device(_dev);
+	ssize_t len;
 
-	if (!dev->driver_override)
-		return 0;
-
-	return sprintf(buf, "%s\n", dev->driver_override);
+	device_lock(_dev);
+	len = sprintf(buf, "%s\n", dev->driver_override);
+	device_unlock(_dev);
+	return len;
 }
 
 static ssize_t driver_override_store(struct device *_dev,
@@ -81,7 +82,7 @@
 				     const char *buf, size_t count)
 {
 	struct amba_device *dev = to_amba_device(_dev);
-	char *driver_override, *old = dev->driver_override, *cp;
+	char *driver_override, *old, *cp;
 
 	/* We need to keep extra room for a newline */
 	if (count >= (PAGE_SIZE - 1))
@@ -95,12 +96,15 @@
 	if (cp)
 		*cp = '\0';
 
+	device_lock(_dev);
+	old = dev->driver_override;
 	if (strlen(driver_override)) {
 		dev->driver_override = driver_override;
 	} else {
 	       kfree(driver_override);
 	       dev->driver_override = NULL;
 	}
+	device_unlock(_dev);
 
 	kfree(old);
 
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index e7e4560..957eb3c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3001,6 +3001,14 @@
 			else
 				return_error = BR_DEAD_REPLY;
 			mutex_unlock(&context->context_mgr_node_lock);
+			if (target_node && target_proc == proc) {
+				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
+						  proc->pid, thread->pid);
+				return_error = BR_FAILED_REPLY;
+				return_error_param = -EINVAL;
+				return_error_line = __LINE__;
+				goto err_invalid_target_handle;
+			}
 		}
 		if (!target_node) {
 			/*
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index e08c09f..0e2c0ac 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4366,6 +4366,10 @@
 	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
 	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
 
+	/* Some Sandisk SSDs lock up hard with NCQ enabled.  Reported on
+	   SD7SN6S256G and SD8SN8U256G */
+	{ "SanDisk SD[78]SN*G",	NULL,		ATA_HORKAGE_NONCQ, },
+
 	/* devices which puke on READ_NATIVE_MAX */
 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
@@ -4422,7 +4426,12 @@
 						ATA_HORKAGE_ZERO_AFTER_TRIM |
 						ATA_HORKAGE_NOLPM, },
 
+	/* Sandisk devices which are known to not handle LPM well */
+	{ "SanDisk SD7UB3Q*G1001",	NULL,	ATA_HORKAGE_NOLPM, },
+
 	/* devices that don't properly handle queued TRIM commands */
+	{ "Micron_M500IT_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
 	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
 	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 9babbc8..fb2c00f 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -4156,7 +4156,7 @@
 #ifdef ATA_DEBUG
 	struct scsi_device *scsidev = cmd->device;
 
-	DPRINTK("CDB (%u:%d,%d,%d) %9ph\n",
+	DPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",
 		ap->print_id,
 		scsidev->channel, scsidev->id, scsidev->lun,
 		cmd->cmnd);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index d3dc954..81bfeec 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -23,6 +23,7 @@
 #include <linux/bitops.h>
 #include <linux/wait.h>
 #include <linux/slab.h>
+#include <linux/nospec.h>
 #include <asm/byteorder.h>
 #include <asm/string.h>
 #include <asm/io.h>
@@ -1458,6 +1459,8 @@
 					return -EFAULT;
 				if (pool < 0 || pool > ZATM_LAST_POOL)
 					return -EINVAL;
+				pool = array_index_nospec(pool,
+							  ZATM_LAST_POOL + 1);
 				spin_lock_irqsave(&zatm_dev->lock, flags);
 				info = zatm_dev->pool_info[pool];
 				if (cmd == ZATM_GETPOOLZ) {
diff --git a/drivers/base/core.c b/drivers/base/core.c
index ac43d6f..4272868 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -876,8 +876,8 @@
 	struct device_node *of_node = dev_of_node(dev);
 	int error;
 
-	if (of_node) {
-		error = sysfs_create_link(&dev->kobj, &of_node->kobj,"of_node");
+	if (of_node && of_node_kobj(of_node)) {
+		error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
 		if (error)
 			dev_warn(dev, "Error %d creating of_node link\n",error);
 		/* An error here doesn't warrant bringing down the device */
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 4fe86f7..f9e1010 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -610,14 +610,22 @@
 	return sprintf(buf, "Not affected\n");
 }
 
+ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
+					  struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Not affected\n");
+}
+
 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
 static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
 
 static struct attribute *cpu_root_vulnerabilities_attrs[] = {
 	&dev_attr_meltdown.attr,
 	&dev_attr_spectre_v1.attr,
 	&dev_attr_spectre_v2.attr,
+	&dev_attr_spec_store_bypass.attr,
 	NULL
 };
 
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index f74f3ca..18228ba 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -98,7 +98,7 @@
 	int ret;
 	unsigned int val;
 
-	if (map->cache == REGCACHE_NONE)
+	if (map->cache_type == REGCACHE_NONE)
 		return false;
 
 	if (!map->cache_ops)
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 9336236..8474a1b 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -230,6 +230,8 @@
 	struct pcd_unit *cd = bdev->bd_disk->private_data;
 	int ret;
 
+	check_disk_change(bdev);
+
 	mutex_lock(&pcd_mutex);
 	ret = cdrom_open(&cd->info, bdev, mode);
 	mutex_unlock(&pcd_mutex);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f8ba5c7..bff67c5 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -217,6 +217,7 @@
 	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
@@ -249,7 +250,6 @@
 	{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
 
 	/* QCA ROME chipset */
-	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
@@ -345,6 +345,9 @@
 	{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
 	{ USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
 
+	/* Additional Realtek 8723BU Bluetooth devices */
+	{ USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
+
 	/* Additional Realtek 8821AE Bluetooth devices */
 	{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
 	{ USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK },
@@ -352,6 +355,9 @@
 	{ USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
 	{ USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
 
+	/* Additional Realtek 8822BE Bluetooth devices */
+	{ USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
+
 	/* Silicon Wave based devices */
 	{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
 
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 5d475b3..07b77fb 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1154,9 +1154,6 @@
 
 	cd_dbg(CD_OPEN, "entering cdrom_open\n");
 
-	/* open is event synchronization point, check events first */
-	check_disk_change(bdev);
-
 	/* if this was a O_NONBLOCK open and we should honor the flags,
 	 * do a quick open without drive/disc integrity checks. */
 	cdi->use_count++;
@@ -2368,7 +2365,7 @@
 	if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
 		return media_changed(cdi, 1);
 
-	if ((unsigned int)arg >= cdi->capacity)
+	if (arg >= cdi->capacity)
 		return -EINVAL;
 
 	info = kmalloc(sizeof(*info), GFP_KERNEL);
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 584bc31..e2808fe 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -497,6 +497,9 @@
 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
 {
 	int ret;
+
+	check_disk_change(bdev);
+
 	mutex_lock(&gdrom_mutex);
 	ret = cdrom_open(gd.cd_info, bdev, mode);
 	mutex_unlock(&gdrom_mutex);
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 4d734bf..d52c80c 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1567,9 +1567,18 @@
 		if (map && (map->attr & FASTRPC_ATTR_COHERENT))
 			continue;
 
-		if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart)
-			dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
-			uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
+		if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart) {
+			if (map && map->handle)
+				msm_ion_do_cache_op(ctx->fl->apps->client,
+					map->handle,
+					uint64_to_ptr(rpra[i].buf.pv),
+					rpra[i].buf.len,
+					ION_IOC_CLEAN_INV_CACHES);
+			else
+				dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
+					uint64_to_ptr(rpra[i].buf.pv
+						+ rpra[i].buf.len));
+		}
 	}
 	PERF_END);
 	for (i = bufs; rpra && i < bufs + handles; i++) {
@@ -1578,11 +1587,6 @@
 		rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
 	}
 
-	if (!ctx->fl->sctx->smmu.coherent) {
-		PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
-		dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
-		PERF_END);
-	}
  bail:
 	return err;
 }
@@ -1671,14 +1675,33 @@
 		if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
 				buf_page_start(rpra[i].buf.pv))
 			continue;
-		if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
-			dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
-				(char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
+		if (!IS_CACHE_ALIGNED((uintptr_t)
+				uint64_to_ptr(rpra[i].buf.pv))) {
+			if (map && map->handle)
+				msm_ion_do_cache_op(ctx->fl->apps->client,
+					map->handle,
+					uint64_to_ptr(rpra[i].buf.pv),
+					sizeof(uintptr_t),
+					ION_IOC_CLEAN_INV_CACHES);
+			else
+				dmac_flush_range(
+					uint64_to_ptr(rpra[i].buf.pv), (char *)
+					uint64_to_ptr(rpra[i].buf.pv + 1));
+		}
+
 		end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
 							rpra[i].buf.len);
-		if (!IS_CACHE_ALIGNED(end))
-			dmac_flush_range((char *)end,
-				(char *)end + 1);
+		if (!IS_CACHE_ALIGNED(end)) {
+			if (map && map->handle)
+				msm_ion_do_cache_op(ctx->fl->apps->client,
+						map->handle,
+						uint64_to_ptr(end),
+						sizeof(uintptr_t),
+						ION_IOC_CLEAN_INV_CACHES);
+			else
+				dmac_flush_range((char *)end,
+					(char *)end + 1);
+		}
 	}
 }
 
@@ -1687,7 +1710,6 @@
 	int i, inbufs, outbufs;
 	uint32_t sc = ctx->sc;
 	remote_arg64_t *rpra = ctx->rpra;
-	int used = ctx->used;
 
 	inbufs = REMOTE_SCALARS_INBUFS(sc);
 	outbufs = REMOTE_SCALARS_OUTBUFS(sc);
@@ -1718,8 +1740,6 @@
 						 + rpra[i].buf.len));
 	}
 
-	if (rpra)
-		dmac_inv_range(rpra, (char *)rpra + used);
 }
 
 static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
@@ -2764,6 +2784,7 @@
 	mutex_unlock(&fl->perf_mutex);
 	mutex_destroy(&fl->perf_mutex);
 	mutex_destroy(&fl->fl_map_mutex);
+	mutex_destroy(&fl->map_mutex);
 	kfree(fl);
 	return 0;
 }
@@ -2777,7 +2798,6 @@
 			pm_qos_remove_request(&fl->pm_qos_req);
 		if (fl->debugfs_file != NULL)
 			debugfs_remove(fl->debugfs_file);
-		mutex_destroy(&fl->map_mutex);
 		fastrpc_file_free(fl);
 		file->private_data = NULL;
 	}
@@ -3259,6 +3279,28 @@
 		if (err)
 			goto bail;
 		break;
+	case FASTRPC_IOCTL_MMAP_64:
+		K_COPY_FROM_USER(err, 0, &p.mmap, param,
+						sizeof(p.mmap));
+		if (err)
+			goto bail;
+		VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
+		if (err)
+			goto bail;
+		K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
+		if (err)
+			goto bail;
+		break;
+	case FASTRPC_IOCTL_MUNMAP_64:
+		K_COPY_FROM_USER(err, 0, &p.munmap, param,
+						sizeof(p.munmap));
+		if (err)
+			goto bail;
+		VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
+							&p.munmap)));
+		if (err)
+			goto bail;
+		break;
 	case FASTRPC_IOCTL_MUNMAP_FD:
 		K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
 			sizeof(p.munmap_fd));
diff --git a/drivers/char/adsprpc_compat.c b/drivers/char/adsprpc_compat.c
index 0f07483..804ceda 100644
--- a/drivers/char/adsprpc_compat.c
+++ b/drivers/char/adsprpc_compat.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,10 @@
 		_IOWR('R', 11, struct compat_fastrpc_ioctl_invoke_crc)
 #define COMPAT_FASTRPC_IOCTL_CONTROL \
 		_IOWR('R', 12, struct compat_fastrpc_ioctl_control)
+#define COMPAT_FASTRPC_IOCTL_MMAP_64 \
+		_IOWR('R', 14, struct compat_fastrpc_ioctl_mmap_64)
+#define COMPAT_FASTRPC_IOCTL_MUNMAP_64 \
+		_IOWR('R', 15, struct compat_fastrpc_ioctl_munmap_64)
 
 struct compat_remote_buf {
 	compat_uptr_t pv;	/* buffer pointer */
@@ -82,11 +86,24 @@
 	compat_uptr_t vaddrout;	/* dsps virtual address */
 };
 
+struct compat_fastrpc_ioctl_mmap_64 {
+	compat_int_t fd;	/* ion fd */
+	compat_uint_t flags;	/* flags for dsp to map with */
+	compat_u64 vaddrin;	/* optional virtual address */
+	compat_size_t size;	/* size */
+	compat_u64 vaddrout;	/* dsps virtual address */
+};
+
 struct compat_fastrpc_ioctl_munmap {
 	compat_uptr_t vaddrout;	/* address to unmap */
 	compat_size_t size;	/* size */
 };
 
+struct compat_fastrpc_ioctl_munmap_64 {
+	compat_u64 vaddrout;	/* address to unmap */
+	compat_size_t size;	/* size */
+};
+
 struct compat_fastrpc_ioctl_init {
 	compat_uint_t flags;	/* one of FASTRPC_INIT_* macros */
 	compat_uptr_t file;	/* pointer to elf file */
@@ -206,6 +223,28 @@
 	return err;
 }
 
+static int compat_get_fastrpc_ioctl_mmap_64(
+			struct compat_fastrpc_ioctl_mmap_64 __user *map32,
+			struct fastrpc_ioctl_mmap __user *map)
+{
+	compat_uint_t u;
+	compat_int_t i;
+	compat_size_t s;
+	compat_u64 p;
+	int err;
+
+	err = get_user(i, &map32->fd);
+	err |= put_user(i, &map->fd);
+	err |= get_user(u, &map32->flags);
+	err |= put_user(u, &map->flags);
+	err |= get_user(p, &map32->vaddrin);
+	err |= put_user(p, &map->vaddrin);
+	err |= get_user(s, &map32->size);
+	err |= put_user(s, &map->size);
+
+	return err;
+}
+
 static int compat_put_fastrpc_ioctl_mmap(
 			struct compat_fastrpc_ioctl_mmap __user *map32,
 			struct fastrpc_ioctl_mmap __user *map)
@@ -219,6 +258,19 @@
 	return err;
 }
 
+static int compat_put_fastrpc_ioctl_mmap_64(
+			struct compat_fastrpc_ioctl_mmap_64 __user *map32,
+			struct fastrpc_ioctl_mmap __user *map)
+{
+	compat_u64 p;
+	int err;
+
+	err = get_user(p, &map->vaddrout);
+	err |= put_user(p, &map32->vaddrout);
+
+	return err;
+}
+
 static int compat_get_fastrpc_ioctl_munmap(
 			struct compat_fastrpc_ioctl_munmap __user *unmap32,
 			struct fastrpc_ioctl_munmap __user *unmap)
@@ -235,6 +287,22 @@
 	return err;
 }
 
+static int compat_get_fastrpc_ioctl_munmap_64(
+			struct compat_fastrpc_ioctl_munmap_64 __user *unmap32,
+			struct fastrpc_ioctl_munmap __user *unmap)
+{
+	compat_u64 p;
+	compat_size_t s;
+	int err;
+
+	err = get_user(p, &unmap32->vaddrout);
+	err |= put_user(p, &unmap->vaddrout);
+	err |= get_user(s, &unmap32->size);
+	err |= put_user(s, &unmap->size);
+
+	return err;
+}
+
 static int compat_get_fastrpc_ioctl_perf(
 			struct compat_fastrpc_ioctl_perf __user *perf32,
 			struct fastrpc_ioctl_perf __user *perf)
@@ -355,6 +423,27 @@
 		VERIFY(err, 0 == compat_put_fastrpc_ioctl_mmap(map32, map));
 		return err;
 	}
+	case COMPAT_FASTRPC_IOCTL_MMAP_64:
+	{
+		struct compat_fastrpc_ioctl_mmap_64  __user *map32;
+		struct fastrpc_ioctl_mmap __user *map;
+		long ret;
+
+		map32 = compat_ptr(arg);
+		VERIFY(err, NULL != (map = compat_alloc_user_space(
+							sizeof(*map))));
+		if (err)
+			return -EFAULT;
+		VERIFY(err, 0 == compat_get_fastrpc_ioctl_mmap_64(map32, map));
+		if (err)
+			return err;
+		ret = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MMAP_64,
+							(unsigned long)map);
+		if (ret)
+			return ret;
+		VERIFY(err, 0 == compat_put_fastrpc_ioctl_mmap_64(map32, map));
+		return err;
+	}
 	case COMPAT_FASTRPC_IOCTL_MUNMAP:
 	{
 		struct compat_fastrpc_ioctl_munmap __user *unmap32;
@@ -372,6 +461,23 @@
 		return filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MUNMAP,
 							(unsigned long)unmap);
 	}
+	case COMPAT_FASTRPC_IOCTL_MUNMAP_64:
+	{
+		struct compat_fastrpc_ioctl_munmap_64 __user *unmap32;
+		struct fastrpc_ioctl_munmap __user *unmap;
+
+		unmap32 = compat_ptr(arg);
+		VERIFY(err, NULL != (unmap = compat_alloc_user_space(
+							sizeof(*unmap))));
+		if (err)
+			return -EFAULT;
+		VERIFY(err, 0 == compat_get_fastrpc_ioctl_munmap_64(unmap32,
+							unmap));
+		if (err)
+			return err;
+		return filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MUNMAP_64,
+							(unsigned long)unmap);
+	}
 	case COMPAT_FASTRPC_IOCTL_INIT:
 		/* fall through */
 	case COMPAT_FASTRPC_IOCTL_INIT_ATTRS:
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index de0dd01..952b87c 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -19,6 +19,8 @@
 #define FASTRPC_IOCTL_INVOKE	_IOWR('R', 1, struct fastrpc_ioctl_invoke)
 #define FASTRPC_IOCTL_MMAP	_IOWR('R', 2, struct fastrpc_ioctl_mmap)
 #define FASTRPC_IOCTL_MUNMAP	_IOWR('R', 3, struct fastrpc_ioctl_munmap)
+#define FASTRPC_IOCTL_MMAP_64	_IOWR('R', 14, struct fastrpc_ioctl_mmap_64)
+#define FASTRPC_IOCTL_MUNMAP_64	_IOWR('R', 15, struct fastrpc_ioctl_munmap_64)
 #define FASTRPC_IOCTL_INVOKE_FD	_IOWR('R', 4, struct fastrpc_ioctl_invoke_fd)
 #define FASTRPC_IOCTL_SETMODE	_IOWR('R', 5, uint32_t)
 #define FASTRPC_IOCTL_INIT	_IOWR('R', 6, struct fastrpc_ioctl_init)
@@ -204,6 +206,11 @@
 	size_t size;		/* size */
 };
 
+struct fastrpc_ioctl_munmap_64 {
+	uint64_t vaddrout;	/* address to unmap */
+	size_t size;		/* size */
+};
+
 struct fastrpc_ioctl_mmap {
 	int fd;				/* ion fd */
 	uint32_t flags;			/* flags for dsp to map with */
@@ -212,6 +219,14 @@
 	uintptr_t vaddrout;		/* dsps virtual address */
 };
 
+struct fastrpc_ioctl_mmap_64 {
+	int fd;				/* ion fd */
+	uint32_t flags;			/* flags for dsp to map with */
+	uint64_t vaddrin;		/* optional virtual address */
+	size_t size;			/* size */
+	uint64_t vaddrout;		/* dsps virtual address */
+};
+
 struct fastrpc_ioctl_munmap_fd {
 	int     fd;				/* fd */
 	uint32_t  flags;		/* control flags */
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 286418f..a089e7c 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -2713,7 +2713,7 @@
 	}
 }
 
-static int diag_dci_init_remote(void)
+int diag_dci_init_remote(void)
 {
 	int i;
 	struct dci_ops_tbl_t *temp = NULL;
@@ -2740,11 +2740,6 @@
 
 	return 0;
 }
-#else
-static int diag_dci_init_remote(void)
-{
-	return 0;
-}
 #endif
 
 static int diag_dci_init_ops_tbl(void)
@@ -2754,10 +2749,6 @@
 	err = diag_dci_init_local();
 	if (err)
 		goto err;
-	err = diag_dci_init_remote();
-	if (err)
-		goto err;
-
 	return 0;
 
 err:
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index 61eb3f5..2fb0e3f 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, 2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -323,6 +323,7 @@
 int diag_dci_write_bridge(int token, unsigned char *buf, int len);
 int diag_dci_write_done_bridge(int index, unsigned char *buf, int len);
 int diag_dci_send_handshake_pkt(int index);
+int diag_dci_init_remote(void);
 #endif
 
 #endif
diff --git a/drivers/char/diag/diag_ipc_logging.h b/drivers/char/diag/diag_ipc_logging.h
index b9958a4..4b8dd1b 100644
--- a/drivers/char/diag/diag_ipc_logging.h
+++ b/drivers/char/diag/diag_ipc_logging.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
 #define DIAG_DEBUG_MASKS	0x0010
 #define DIAG_DEBUG_POWER	0x0020
 #define DIAG_DEBUG_BRIDGE	0x0040
+#define DIAG_DEBUG_CONTROL	0x0080
 
 #define DIAG_DEBUG
 
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index 55b1b49..c00fbfc 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -37,6 +37,7 @@
 		.ctx = 0,
 		.mempool = POOL_TYPE_MUX_APPS,
 		.num_tbl_entries = 0,
+		.md_info_inited = 0,
 		.tbl = NULL,
 		.ops = NULL,
 	},
@@ -46,6 +47,7 @@
 		.ctx = 0,
 		.mempool = POOL_TYPE_MDM_MUX,
 		.num_tbl_entries = 0,
+		.md_info_inited = 0,
 		.tbl = NULL,
 		.ops = NULL,
 	},
@@ -54,6 +56,7 @@
 		.ctx = 0,
 		.mempool = POOL_TYPE_MDM2_MUX,
 		.num_tbl_entries = 0,
+		.md_info_inited = 0,
 		.tbl = NULL,
 		.ops = NULL,
 	},
@@ -62,6 +65,7 @@
 		.ctx = 0,
 		.mempool = POOL_TYPE_QSC_MUX,
 		.num_tbl_entries = 0,
+		.md_info_inited = 0,
 		.tbl = NULL,
 		.ops = NULL,
 	}
@@ -85,6 +89,8 @@
 
 	for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
 		ch = &diag_md[i];
+		if (!ch->md_info_inited)
+			continue;
 		if (ch->ops && ch->ops->open)
 			ch->ops->open(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
 	}
@@ -99,6 +105,8 @@
 
 	for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
 		ch = &diag_md[i];
+		if (!ch->md_info_inited)
+			continue;
 
 		if (ch->ops && ch->ops->close)
 			ch->ops->close(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
@@ -155,7 +163,7 @@
 	mutex_unlock(&driver->md_session_lock);
 
 	ch = &diag_md[id];
-	if (!ch)
+	if (!ch || !ch->md_info_inited)
 		return -EINVAL;
 
 	spin_lock_irqsave(&ch->lock, flags);
@@ -232,6 +240,8 @@
 
 	for (i = 0; i < NUM_DIAG_MD_DEV && !err; i++) {
 		ch = &diag_md[i];
+		if (!ch->md_info_inited)
+			continue;
 		for (j = 0; j < ch->num_tbl_entries && !err; j++) {
 			entry = &ch->tbl[j];
 			if (entry->len <= 0 || entry->buf == NULL)
@@ -352,6 +362,8 @@
 		return -EINVAL;
 
 	ch = &diag_md[id];
+	if (!ch || !ch->md_info_inited)
+		return -EINVAL;
 
 	spin_lock_irqsave(&ch->lock, flags);
 	for (i = 0; i < ch->num_tbl_entries && !found; i++) {
@@ -399,6 +411,7 @@
 			ch->tbl[j].ctx = 0;
 		}
 		spin_lock_init(&(ch->lock));
+		ch->md_info_inited = 1;
 	}
 
 	return 0;
@@ -427,6 +440,7 @@
 			ch->tbl[j].ctx = 0;
 		}
 		spin_lock_init(&(ch->lock));
+		ch->md_info_inited = 1;
 	}
 
 	return 0;
diff --git a/drivers/char/diag/diag_memorydevice.h b/drivers/char/diag/diag_memorydevice.h
index 9b4aa39..4d65ded 100644
--- a/drivers/char/diag/diag_memorydevice.h
+++ b/drivers/char/diag/diag_memorydevice.h
@@ -38,6 +38,7 @@
 	int ctx;
 	int mempool;
 	int num_tbl_entries;
+	int md_info_inited;
 	spinlock_t lock;
 	struct diag_buf_tbl_t *tbl;
 	struct diag_mux_ops *ops;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 54a4d98..a169230 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -970,6 +970,8 @@
 	diagmem_setsize(POOL_TYPE_QSC_MUX, itemsize_qsc_usb,
 			poolsize_qsc_usb);
 	diag_md_mdm_init();
+	if (diag_dci_init_remote())
+		return -ENOMEM;
 	driver->hdlc_encode_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
 	if (!driver->hdlc_encode_buf)
 		return -ENOMEM;
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 6247503..f1bc1c5 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -48,6 +48,7 @@
 #define STM_RSP_SUPPORTED_INDEX		7
 #define STM_RSP_STATUS_INDEX		8
 #define STM_RSP_NUM_BYTES		9
+#define RETRY_MAX_COUNT		1000
 
 struct diag_md_hdlc_reset_work {
 	int pid;
@@ -277,28 +278,22 @@
 	 * if its supporting qshrink4 feature.
 	 */
 	if (info && info->peripheral_mask) {
-		if (info->peripheral_mask == DIAG_CON_ALL ||
-			(info->peripheral_mask & (1 << APPS_DATA)) ||
-			(info->peripheral_mask & (1 << PERIPHERAL_MODEM))) {
-			rsp_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
-		} else {
-			for (i = 0; i < NUM_MD_SESSIONS; i++) {
-				if (info->peripheral_mask & (1 << i))
-					break;
-			}
-		rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1);
+		for (i = 0; i < NUM_MD_SESSIONS; i++) {
+			if (info->peripheral_mask & (1 << i))
+				break;
 		}
+		rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, TYPE_CMD);
 	} else
 		rsp_ctxt = driver->rsp_buf_ctxt;
 	mutex_unlock(&driver->md_session_lock);
 
 	/*
 	 * Keep trying till we get the buffer back. It should probably
-	 * take one or two iterations. When this loops till UINT_MAX, it
+	 * take one or two iterations. When this loops till RETRY_MAX_COUNT, it
 	 * means we did not get a write complete for the previous
 	 * response.
 	 */
-	while (retry_count < UINT_MAX) {
+	while (retry_count < RETRY_MAX_COUNT) {
 		if (!driver->rsp_buf_busy)
 			break;
 		/*
@@ -376,27 +371,21 @@
 	 * if its supporting qshrink4 feature.
 	 */
 	if (info && info->peripheral_mask) {
-		if (info->peripheral_mask == DIAG_CON_ALL ||
-			(info->peripheral_mask & (1 << APPS_DATA)) ||
-			(info->peripheral_mask & (1 << PERIPHERAL_MODEM))) {
-			rsp_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
-		} else {
-			for (i = 0; i < NUM_MD_SESSIONS; i++) {
-				if (info->peripheral_mask & (1 << i))
-					break;
-			}
-		rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1);
+		for (i = 0; i < NUM_MD_SESSIONS; i++) {
+			if (info->peripheral_mask & (1 << i))
+				break;
 		}
+		rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, TYPE_CMD);
 	} else
 		rsp_ctxt = driver->rsp_buf_ctxt;
 	mutex_unlock(&driver->md_session_lock);
 	/*
 	 * Keep trying till we get the buffer back. It should probably
-	 * take one or two iterations. When this loops till UINT_MAX, it
+	 * take one or two iterations. When this loops till RETRY_MAX_COUNT, it
 	 * means we did not get a write complete for the previous
 	 * response.
 	 */
-	while (retry_count < UINT_MAX) {
+	while (retry_count < RETRY_MAX_COUNT) {
 		if (!driver->rsp_buf_busy)
 			break;
 		/*
@@ -1843,14 +1832,18 @@
 		}
 		break;
 	case TYPE_CMD:
-		if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
+		if (peripheral >= 0 && peripheral < NUM_PERIPHERALS &&
+			num != TYPE_CMD) {
 			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
 			"Marking buffer as free after write done p: %d, t: %d, buf_num: %d\n",
-				peripheral, type, num);
+			peripheral, type, num);
 			diagfwd_write_done(peripheral, type, num);
-		}
-		if (peripheral == APPS_DATA ||
-				ctxt == DIAG_MEMORY_DEVICE_MODE) {
+		} else if (peripheral == APPS_DATA ||
+			(peripheral >= 0 && peripheral < NUM_PERIPHERALS &&
+			num == TYPE_CMD)) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Marking APPS response buffer free after write done for p: %d, t: %d, buf_num: %d\n",
+			peripheral, type, num);
 			spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
 			driver->rsp_buf_busy = 0;
 			driver->encoded_rsp_len = 0;
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index 8d47ee38..6f81bfd 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -45,8 +45,11 @@
 
 void diag_cntl_channel_open(struct diagfwd_info *p_info)
 {
-	if (!p_info)
+	if (!p_info) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid fwd_info structure\n");
 		return;
+	}
 	driver->mask_update |= PERIPHERAL_MASK(p_info->peripheral);
 	queue_work(driver->cntl_wq, &driver->mask_update_work);
 	diag_notify_md_client(p_info->peripheral, DIAG_STATUS_OPEN);
@@ -56,12 +59,18 @@
 {
 	uint8_t peripheral;
 
-	if (!p_info)
+	if (!p_info) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid fwd_info structure\n");
 		return;
+	}
 
 	peripheral = p_info->peripheral;
-	if (peripheral >= NUM_PERIPHERALS)
+	if (peripheral >= NUM_PERIPHERALS) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: Invalid peripheral (%d)\n", peripheral);
 		return;
+	}
 
 	driver->feature[peripheral].sent_feature_mask = 0;
 	driver->feature[peripheral].rcvd_feature_mask = 0;
@@ -87,8 +96,11 @@
 	driver->stm_peripheral = 0;
 	mutex_unlock(&driver->cntl_lock);
 
-	if (peripheral_mask == 0)
+	if (peripheral_mask == 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: Empty Peripheral mask\n");
 		return;
+	}
 
 	for (i = 0; i < NUM_PERIPHERALS; i++) {
 		if (!driver->feature[i].stm_support)
@@ -111,11 +123,18 @@
 	struct pid *pid_struct;
 	struct task_struct *result;
 
-	if (peripheral > NUM_PERIPHERALS)
+	if (peripheral > NUM_PERIPHERALS) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: Invalid peripheral (%d)\n", peripheral);
 		return;
+	}
 
-	if (driver->logging_mode != DIAG_MEMORY_DEVICE_MODE)
+	if (driver->logging_mode != DIAG_MEMORY_DEVICE_MODE) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: Invalid logging_mode (%d)\n",
+			driver->logging_mode);
 		return;
+	}
 
 	mutex_lock(&driver->md_session_lock);
 	memset(&info, 0, sizeof(struct siginfo));
@@ -171,8 +190,12 @@
 	uint32_t pd;
 	int status = DIAG_STATUS_CLOSED;
 
-	if (!buf || peripheral >= NUM_PERIPHERALS || len < sizeof(*pd_msg))
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < sizeof(*pd_msg)) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d, pd_msg_len = %d\n",
+		!buf, peripheral, len, (int)sizeof(*pd_msg));
 		return;
+	}
 
 	pd_msg = (struct diag_ctrl_msg_pd_status *)buf;
 	pd = pd_msg->pd_id;
@@ -182,8 +205,11 @@
 
 static void enable_stm_feature(uint8_t peripheral)
 {
-	if (peripheral >= NUM_PERIPHERALS)
+	if (peripheral >= NUM_PERIPHERALS) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid peripheral (%d)\n", peripheral);
 		return;
+	}
 
 	mutex_lock(&driver->cntl_lock);
 	driver->feature[peripheral].stm_support = ENABLE_STM;
@@ -195,8 +221,11 @@
 
 static void enable_socket_feature(uint8_t peripheral)
 {
-	if (peripheral >= NUM_PERIPHERALS)
+	if (peripheral >= NUM_PERIPHERALS) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid peripheral (%d)\n", peripheral);
 		return;
+	}
 
 	if (driver->supports_sockets)
 		driver->feature[peripheral].sockets_enabled = 1;
@@ -206,8 +235,11 @@
 
 static void process_hdlc_encoding_feature(uint8_t peripheral)
 {
-	if (peripheral >= NUM_PERIPHERALS)
+	if (peripheral >= NUM_PERIPHERALS) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid peripheral (%d)\n", peripheral);
 		return;
+	}
 
 	if (driver->supports_apps_hdlc_encoding) {
 		driver->feature[peripheral].encode_hdlc =
@@ -220,8 +252,11 @@
 
 static void process_upd_header_untagging_feature(uint8_t peripheral)
 {
-	if (peripheral >= NUM_PERIPHERALS)
+	if (peripheral >= NUM_PERIPHERALS) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid peripheral (%d)\n", peripheral);
 		return;
+	}
 
 	if (driver->supports_apps_header_untagging) {
 		driver->feature[peripheral].untag_header =
@@ -247,8 +282,16 @@
 	 * Perform Basic sanity. The len field is the size of the data payload.
 	 * This doesn't include the header size.
 	 */
-	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d\n",
+		!buf, peripheral, len);
 		return;
+	}
+
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag:peripheral(%d) command deregistration packet processing started\n",
+		peripheral);
 
 	dereg = (struct diag_ctrl_cmd_dereg *)ptr;
 	ptr += header_len;
@@ -256,8 +299,8 @@
 	read_len += header_len - (2 * sizeof(uint32_t));
 
 	if (dereg->count_entries == 0) {
-		pr_debug("diag: In %s, received reg tbl with no entries\n",
-			 __func__);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: received reg tbl with no entries\n");
 		return;
 	}
 
@@ -276,6 +319,9 @@
 		pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
 		       __func__, read_len, len, dereg->count_entries);
 	}
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag:peripheral(%d) command deregistration packet processing complete\n",
+		peripheral);
 }
 static void process_command_registration(uint8_t *buf, uint32_t len,
 					 uint8_t peripheral)
@@ -292,8 +338,15 @@
 	 * Perform Basic sanity. The len field is the size of the data payload.
 	 * This doesn't include the header size.
 	 */
-	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d\n",
+		!buf, peripheral, len);
 		return;
+	}
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: peripheral(%d) command registration packet processing started\n",
+		peripheral);
 
 	reg = (struct diag_ctrl_cmd_reg *)ptr;
 	ptr += header_len;
@@ -301,7 +354,8 @@
 	read_len += header_len - (2 * sizeof(uint32_t));
 
 	if (reg->count_entries == 0) {
-		pr_debug("diag: In %s, received reg tbl with no entries\n",
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: In %s, received reg tbl with no entries\n",
 			 __func__);
 		return;
 	}
@@ -321,6 +375,9 @@
 		pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
 		       __func__, read_len, len, reg->count_entries);
 	}
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: peripheral(%d) command registration packet processing complete\n",
+		peripheral);
 }
 
 static void diag_close_transport_work_fn(struct work_struct *work)
@@ -347,8 +404,11 @@
 
 static void process_socket_feature(uint8_t peripheral)
 {
-	if (peripheral >= NUM_PERIPHERALS)
+	if (peripheral >= NUM_PERIPHERALS) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid peripheral (%d)\n", peripheral);
 		return;
+	}
 
 	mutex_lock(&driver->cntl_lock);
 	driver->close_transport |= PERIPHERAL_MASK(peripheral);
@@ -379,15 +439,20 @@
 	uint32_t feature_mask = 0;
 	uint8_t *ptr = buf;
 
-	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d\n",
+		!buf, peripheral, len);
 		return;
+	}
 
 	header = (struct diag_ctrl_feature_mask *)ptr;
 	ptr += header_len;
 	feature_mask_len = header->feature_mask_len;
 
 	if (feature_mask_len == 0) {
-		pr_debug("diag: In %s, received invalid feature mask from peripheral %d\n",
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: In %s, received invalid feature mask from peripheral %d\n",
 			 __func__, peripheral);
 		return;
 	}
@@ -400,6 +465,8 @@
 	diag_cmd_remove_reg_by_proc(peripheral);
 
 	driver->feature[peripheral].rcvd_feature_mask = 1;
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+	"diag: Received feature mask for peripheral %d\n", peripheral);
 
 	for (i = 0; i < feature_mask_len && read_len < len; i++) {
 		feature_mask = *(uint8_t *)ptr;
@@ -431,6 +498,10 @@
 
 	process_socket_feature(peripheral);
 	process_log_on_demand_feature(peripheral);
+
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: Peripheral(%d) feature mask is processed\n",
+		peripheral);
 }
 
 static void process_last_event_report(uint8_t *buf, uint32_t len,
@@ -442,14 +513,23 @@
 	uint32_t pkt_len = sizeof(uint32_t) + sizeof(uint16_t);
 	uint16_t event_size = 0;
 
-	if (!buf || peripheral >= NUM_PERIPHERALS || len != pkt_len)
+	if (!buf || peripheral >= NUM_PERIPHERALS || len != pkt_len) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d, pkt_len = %d\n",
+		!buf, peripheral, len, pkt_len);
 		return;
+	}
+
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag:started processing last event report for peripheral (%d)\n",
+		peripheral);
 
 	mutex_lock(&event_mask.lock);
 	header = (struct diag_ctrl_last_event_report *)ptr;
 	event_size = ((header->event_last_id / 8) + 1);
 	if (event_size >= driver->event_mask_size) {
-		pr_debug("diag: In %s, receiving event mask size more that Apps can handle\n",
+		DIAG_LOG(DIAG_DEBUG_CONTROL,
+			"diag: In %s, receiving event mask size more that Apps can handle\n",
 			 __func__);
 		temp = krealloc(driver->event_mask->ptr, event_size,
 				GFP_KERNEL);
@@ -467,6 +547,9 @@
 		driver->last_event_id = header->event_last_id;
 err:
 	mutex_unlock(&event_mask.lock);
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: last event report processed for peripheral (%d)\n",
+		peripheral);
 }
 
 static void process_log_range_report(uint8_t *buf, uint32_t len,
@@ -480,8 +563,15 @@
 	struct diag_ctrl_log_range *log_range = NULL;
 	struct diag_log_mask_t *mask_ptr = NULL;
 
-	if (!buf || peripheral >= NUM_PERIPHERALS || len < 0)
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d\n",
+		!buf, peripheral, len);
 		return;
+	}
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag:started processing log range report for peripheral(%d)\n",
+		peripheral);
 
 	header = (struct diag_ctrl_log_range_report *)ptr;
 	ptr += header_len;
@@ -507,6 +597,9 @@
 		mask_ptr->range = LOG_ITEMS_TO_SIZE(log_range->num_items);
 		mutex_unlock(&(mask_ptr->lock));
 	}
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: log range report processed for peripheral (%d)\n",
+		peripheral);
 }
 
 static int update_msg_mask_tbl_entry(struct diag_msg_mask_t *mask,
@@ -514,8 +607,12 @@
 {
 	uint32_t temp_range;
 
-	if (!mask || !range)
+	if (!mask || !range) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid %s\n",
+		(!mask ? "mask" : (!range ? "range" : " ")));
 		return -EIO;
+	}
 	if (range->ssid_last < range->ssid_first) {
 		pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
 		       __func__, range->ssid_first, range->ssid_last);
@@ -547,8 +644,16 @@
 	uint8_t *temp = NULL;
 	uint32_t min_len = header_len - sizeof(struct diag_ctrl_pkt_header_t);
 
-	if (!buf || peripheral >= NUM_PERIPHERALS || len < min_len)
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < min_len) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d, min_len = %d\n",
+		!buf, peripheral, len, min_len);
 		return;
+	}
+
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: started processing ssid range for peripheral (%d)\n",
+		peripheral);
 
 	header = (struct diag_ctrl_ssid_range_report *)ptr;
 	ptr += header_len;
@@ -600,6 +705,9 @@
 		driver->msg_mask_tbl_count += 1;
 	}
 	mutex_unlock(&driver->msg_mask_lock);
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: processed ssid range for peripheral(%d)\n",
+		peripheral);
 }
 
 static void diag_build_time_mask_update(uint8_t *buf,
@@ -616,8 +724,12 @@
 	uint32_t *dest_ptr = NULL;
 	struct diag_msg_mask_t *build_mask = NULL;
 
-	if (!range || !buf)
+	if (!range || !buf) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid %s\n",
+		(!range ? "range" : (!buf ? "buf" : " ")));
 		return;
+	}
 
 	if (range->ssid_last < range->ssid_first) {
 		pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
@@ -679,8 +791,16 @@
 	struct diag_ctrl_build_mask_report *header = NULL;
 	struct diag_ssid_range_t *range = NULL;
 
-	if (!buf || peripheral >= NUM_PERIPHERALS || len < header_len)
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < header_len) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d, header_len = %d\n",
+		!buf, peripheral, len, header_len);
 		return;
+	}
+
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: started processing build mask for peripheral(%d)\n",
+		peripheral);
 
 	header = (struct diag_ctrl_build_mask_report *)ptr;
 	ptr += header_len;
@@ -696,6 +816,8 @@
 		ptr += num_items * sizeof(uint32_t);
 		read_len += num_items * sizeof(uint32_t);
 	}
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+		"diag: processing build mask complete (%d)\n", peripheral);
 }
 
 int diag_add_diag_id_to_list(uint8_t diag_id, char *process_name,
@@ -703,8 +825,12 @@
 {
 	struct diag_id_tbl_t *new_item = NULL;
 
-	if (!process_name || diag_id == 0)
+	if (!process_name || diag_id == 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters: !process_name = %d, diag_id = %d\n",
+		!process_name, diag_id);
 		return -EINVAL;
+	}
 
 	new_item = kzalloc(sizeof(struct diag_id_tbl_t), GFP_KERNEL);
 	if (!new_item)
@@ -734,8 +860,10 @@
 	struct list_head *temp;
 	struct diag_id_tbl_t *item = NULL;
 
-	if (!process_name || !diag_id)
+	if (!process_name || !diag_id) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: Invalid parameters\n");
 		return -EINVAL;
+	}
 
 	mutex_lock(&driver->diag_id_mutex);
 	list_for_each_safe(start, temp, &driver->diag_id_list) {
@@ -762,8 +890,12 @@
 	uint8_t local_diag_id = 0;
 	uint8_t new_request = 0, i = 0, ch_type = 0;
 
-	if (!buf || len == 0 || peripheral >= NUM_PERIPHERALS)
+	if (!buf || len == 0 || peripheral >= NUM_PERIPHERALS) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Invalid parameters: !buf = %d, len = %d, peripheral = %d\n",
+		!buf, len, peripheral);
 		return;
+	}
 
 	header = (struct diag_ctrl_diagid *)buf;
 	process_name = (char *)&header->process_name;
@@ -841,7 +973,7 @@
 		fwd_info = &peripheral_info[TYPE_DATA][peripheral];
 		diagfwd_buffers_init(fwd_info);
 		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
-		"diag: diag_id sent = %d to peripheral = %d with diag_id = %d for %s :\n",
+		"diag: diag_id sent = %d to peripheral = %d with diag_id = %d for %s\n",
 			driver->diag_id_sent[peripheral], peripheral,
 			ctrl_pkt.diag_id, process_name);
 	}
@@ -855,8 +987,10 @@
 	uint8_t *ptr = buf;
 	struct diag_ctrl_pkt_header_t *ctrl_pkt = NULL;
 
-	if (!buf || len <= 0 || !p_info)
+	if (!buf || len <= 0 || !p_info) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: Invalid parameters\n");
 		return;
+	}
 
 	if (reg_dirty & PERIPHERAL_MASK(p_info->peripheral)) {
 		pr_err_ratelimited("diag: dropping command registration from peripheral %d\n",
@@ -866,6 +1000,9 @@
 
 	while (read_len + header_len < len) {
 		ctrl_pkt = (struct diag_ctrl_pkt_header_t *)ptr;
+		DIAG_LOG(DIAG_DEBUG_CONTROL,
+			"diag:peripheral: %d: pkt_id: %d\n",
+			p_info->peripheral, ctrl_pkt->pkt_id);
 		switch (ctrl_pkt->pkt_id) {
 		case DIAG_CTRL_MSG_REG:
 			process_command_registration(ptr, ctrl_pkt->len,
@@ -904,12 +1041,15 @@
 						   p_info->peripheral);
 			break;
 		default:
-			pr_debug("diag: Control packet %d not supported\n",
-				 ctrl_pkt->pkt_id);
+			DIAG_LOG(DIAG_DEBUG_CONTROL,
+			"diag: Control packet %d not supported\n",
+			 ctrl_pkt->pkt_id);
 		}
 		ptr += header_len + ctrl_pkt->len;
 		read_len += header_len + ctrl_pkt->len;
 	}
+	DIAG_LOG(DIAG_DEBUG_CONTROL,
+	"diag: control packet processing complete\n");
 }
 
 static int diag_compute_real_time(int idx)
@@ -1127,15 +1267,16 @@
 	for (i = 0; i < DIAG_NUM_PROC; i++) {
 		temp_real_time = diag_compute_real_time(i);
 		if (temp_real_time == driver->real_time_mode[i]) {
-			pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"diag: did not update real time mode on proc %d, already in the req mode %d\n",
 				i, temp_real_time);
 			continue;
 		}
 
 		if (i == DIAG_LOCAL_PROC) {
 			if (!send_update) {
-				pr_debug("diag: In %s, cannot send real time mode pkt since one of the periperhal is in buffering mode\n",
-					 __func__);
+				DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"diag: cannot send real time mode pkt since one of the periperhal is in buffering mode\n");
 				break;
 			}
 			for (j = 0; j < NUM_PERIPHERALS; j++)
@@ -1169,7 +1310,8 @@
 			temp_real_time = MODE_NONREALTIME;
 		}
 		if (temp_real_time == driver->real_time_mode[i]) {
-			pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"diag: did not update real time mode on proc %d, already in the req mode %d\n",
 				i, temp_real_time);
 			continue;
 		}
@@ -1204,8 +1346,8 @@
 
 	if (!driver->diagfwd_cntl[peripheral] ||
 	    !driver->diagfwd_cntl[peripheral]->ch_open) {
-		pr_debug("diag: In %s, control channel is not open, p: %d\n",
-			 __func__, peripheral);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: control channel is not open, p: %d\n", peripheral);
 		return err;
 	}
 
@@ -1317,8 +1459,9 @@
 	}
 
 	if (!driver->feature[peripheral].peripheral_buffering) {
-		pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
-			 __func__, peripheral);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: peripheral %d doesn't support buffering\n",
+			 peripheral);
 		driver->buffering_flag[params->peripheral] = 0;
 		return -EIO;
 	}
@@ -1383,8 +1526,9 @@
 
 	if (!driver->diagfwd_cntl[peripheral] ||
 	    !driver->diagfwd_cntl[peripheral]->ch_open) {
-		pr_debug("diag: In %s, control channel is not open, p: %d\n",
-			 __func__, peripheral);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: control channel is not open, p: %d\n",
+			 peripheral);
 		return -ENODEV;
 	}
 
@@ -1413,15 +1557,17 @@
 	struct diag_ctrl_drain_immediate_v2 ctrl_pkt_v2;
 
 	if (!driver->feature[peripheral].peripheral_buffering) {
-		pr_debug("diag: In %s, peripheral  %d doesn't support buffering\n",
-			 __func__, peripheral);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: peripheral  %d doesn't support buffering\n",
+			 peripheral);
 		return -EINVAL;
 	}
 
 	if (!driver->diagfwd_cntl[peripheral] ||
 	    !driver->diagfwd_cntl[peripheral]->ch_open) {
-		pr_debug("diag: In %s, control channel is not open, p: %d\n",
-			 __func__, peripheral);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: control channel is not open, p: %d\n",
+			 peripheral);
 		return -ENODEV;
 	}
 
@@ -1478,8 +1624,9 @@
 	}
 
 	if (!driver->feature[peripheral].peripheral_buffering) {
-		pr_debug("diag: In %s, peripheral  %d doesn't support buffering\n",
-			 __func__, peripheral);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: peripheral  %d doesn't support buffering\n",
+			 peripheral);
 		return -EINVAL;
 	}
 
@@ -1557,15 +1704,17 @@
 	}
 
 	if (!driver->feature[peripheral].peripheral_buffering) {
-		pr_debug("diag: In %s, peripheral  %d doesn't support buffering\n",
-			 __func__, peripheral);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: peripheral  %d doesn't support buffering\n",
+			 peripheral);
 		return -EINVAL;
 	}
 
 	if (!driver->diagfwd_cntl[peripheral] ||
 	    !driver->diagfwd_cntl[peripheral]->ch_open) {
-		pr_debug("diag: In %s, control channel is not open, p: %d\n",
-			 __func__, peripheral);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: control channel is not open, p: %d\n",
+			 peripheral);
 		return -ENODEV;
 	}
 
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 7225dc2..2022e7b 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -723,6 +723,7 @@
 				   unsigned char *buf, int len)
 {
 	if (!fwd_info) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: Invalid fwd_info\n");
 		diag_ws_release();
 		return;
 	}
@@ -743,8 +744,12 @@
 	 */
 	diag_ws_on_copy_fail(DIAG_WS_MUX);
 	/* Reset the buffer in_busy value after processing the data */
-	if (fwd_info->buf_1)
+	if (fwd_info->buf_1) {
 		atomic_set(&fwd_info->buf_1->in_busy, 0);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+			fwd_info->peripheral, fwd_info->type);
+	}
 
 	diagfwd_queue_read(fwd_info);
 	diagfwd_queue_read(&peripheral_info[TYPE_DATA][fwd_info->peripheral]);
@@ -769,8 +774,12 @@
 
 	diag_dci_process_peripheral_data(fwd_info, (void *)buf, len);
 	/* Reset the buffer in_busy value after processing the data */
-	if (fwd_info->buf_1)
+	if (fwd_info->buf_1) {
 		atomic_set(&fwd_info->buf_1->in_busy, 0);
+		DIAG_LOG(DIAG_DEBUG_DCI,
+		"Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+			fwd_info->peripheral, fwd_info->type);
+	}
 
 	diagfwd_queue_read(fwd_info);
 }
@@ -1638,13 +1647,15 @@
 	struct diagfwd_buf_t *temp_buf = NULL;
 
 	if (!fwd_info) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: Invalid fwd_info\n");
 		diag_ws_release();
 		return;
 	}
 
 	if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) {
-		pr_debug("diag: In %s, p: %d, t: %d, inited: %d, opened: %d  ch_open: %d\n",
-			 __func__, fwd_info->peripheral, fwd_info->type,
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: p: %d, t: %d, inited: %d, opened: %d, ch_open: %d\n",
+			 fwd_info->peripheral, fwd_info->type,
 			 fwd_info->inited, atomic_read(&fwd_info->opened),
 			 fwd_info->ch_open);
 		diag_ws_release();
@@ -1680,8 +1691,9 @@
 			atomic_set(&temp_buf->in_busy, 1);
 		}
 	} else {
-		pr_debug("diag: In %s, both buffers are empty for p: %d, t: %d\n",
-			 __func__, fwd_info->peripheral, fwd_info->type);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: both buffers are busy for p: %d, t: %d\n",
+			 fwd_info->peripheral, fwd_info->type);
 	}
 
 	if (!read_buf) {
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 63d84e6..83c6959 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -21,6 +21,7 @@
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/pm_runtime.h>
+#include <linux/reset.h>
 #include <linux/slab.h>
 
 #define RNG_CR 0x00
@@ -46,6 +47,7 @@
 	struct hwrng rng;
 	void __iomem *base;
 	struct clk *clk;
+	struct reset_control *rst;
 };
 
 static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
@@ -140,6 +142,13 @@
 	if (IS_ERR(priv->clk))
 		return PTR_ERR(priv->clk);
 
+	priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
+	if (!IS_ERR(priv->rst)) {
+		reset_control_assert(priv->rst);
+		udelay(2);
+		reset_control_deassert(priv->rst);
+	}
+
 	dev_set_drvdata(dev, priv);
 
 	priv->rng.name = dev_driver_string(dev),
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index 6e658aa..a70518a 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -251,8 +251,9 @@
 		ipmi->irq = opal_event_request(prop);
 	}
 
-	if (request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
-				"opal-ipmi", ipmi)) {
+	rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
+			 "opal-ipmi", ipmi);
+	if (rc) {
 		dev_warn(dev, "Unable to request irq\n");
 		goto err_dispose;
 	}
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index f11c1c7..1213191 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -761,7 +761,7 @@
 			ssif_info->ssif_state = SSIF_NORMAL;
 			ipmi_ssif_unlock_cond(ssif_info, flags);
 			pr_warn(PFX "Error getting flags: %d %d, %x\n",
-			       result, len, data[2]);
+			       result, len, (len >= 3) ? data[2] : 0);
 		} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
 			   || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
 			/*
@@ -783,7 +783,7 @@
 		if ((result < 0) || (len < 3) || (data[2] != 0)) {
 			/* Error clearing flags */
 			pr_warn(PFX "Error clearing flags: %d %d, %x\n",
-			       result, len, data[2]);
+			       result, len, (len >= 3) ? data[2] : 0);
 		} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
 			   || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
 			pr_warn(PFX "Invalid response clearing flags: %x %x\n",
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 8a167a6..1b3c731 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -259,6 +259,7 @@
 #include <linux/kmemcheck.h>
 #include <linux/workqueue.h>
 #include <linux/irq.h>
+#include <linux/ratelimit.h>
 #include <linux/syscalls.h>
 #include <linux/completion.h>
 #include <linux/uuid.h>
@@ -444,6 +445,16 @@
 				    __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
 static void process_random_ready_list(void);
 
+static struct ratelimit_state unseeded_warning =
+	RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
+static struct ratelimit_state urandom_warning =
+	RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
+
+static int ratelimit_disable __read_mostly;
+
+module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
+MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
+
 /**********************************************************************
  *
  * OS independent entropy store.   Here are the functions which handle
@@ -819,6 +830,39 @@
 	return 1;
 }
 
+#ifdef CONFIG_NUMA
+static void do_numa_crng_init(struct work_struct *work)
+{
+	int i;
+	struct crng_state *crng;
+	struct crng_state **pool;
+
+	pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
+	for_each_online_node(i) {
+		crng = kmalloc_node(sizeof(struct crng_state),
+				    GFP_KERNEL | __GFP_NOFAIL, i);
+		spin_lock_init(&crng->lock);
+		crng_initialize(crng);
+		pool[i] = crng;
+	}
+	mb();
+	if (cmpxchg(&crng_node_pool, NULL, pool)) {
+		for_each_node(i)
+			kfree(pool[i]);
+		kfree(pool);
+	}
+}
+
+static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
+
+static void numa_crng_init(void)
+{
+	schedule_work(&numa_crng_init_work);
+}
+#else
+static void numa_crng_init(void) {}
+#endif
+
 static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
 {
 	unsigned long	flags;
@@ -848,10 +892,23 @@
 	memzero_explicit(&buf, sizeof(buf));
 	crng->init_time = jiffies;
 	if (crng == &primary_crng && crng_init < 2) {
+		numa_crng_init();
 		crng_init = 2;
 		process_random_ready_list();
 		wake_up_interruptible(&crng_init_wait);
 		pr_notice("random: crng init done\n");
+		if (unseeded_warning.missed) {
+			pr_notice("random: %d get_random_xx warning(s) missed "
+				  "due to ratelimiting\n",
+				  unseeded_warning.missed);
+			unseeded_warning.missed = 0;
+		}
+		if (urandom_warning.missed) {
+			pr_notice("random: %d urandom warning(s) missed "
+				  "due to ratelimiting\n",
+				  urandom_warning.missed);
+			urandom_warning.missed = 0;
+		}
 	}
 	spin_unlock_irqrestore(&crng->lock, flags);
 }
@@ -1661,29 +1718,14 @@
  */
 static int rand_initialize(void)
 {
-#ifdef CONFIG_NUMA
-	int i;
-	struct crng_state *crng;
-	struct crng_state **pool;
-#endif
-
 	init_std_data(&input_pool);
 	init_std_data(&blocking_pool);
 	crng_initialize(&primary_crng);
 	crng_global_init_time = jiffies;
-
-#ifdef CONFIG_NUMA
-	pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
-	for_each_online_node(i) {
-		crng = kmalloc_node(sizeof(struct crng_state),
-				    GFP_KERNEL | __GFP_NOFAIL, i);
-		spin_lock_init(&crng->lock);
-		crng_initialize(crng);
-		pool[i] = crng;
+	if (ratelimit_disable) {
+		urandom_warning.interval = 0;
+		unseeded_warning.interval = 0;
 	}
-	mb();
-	crng_node_pool = pool;
-#endif
 	return 0;
 }
 early_initcall(rand_initialize);
@@ -1751,9 +1793,10 @@
 
 	if (!crng_ready() && maxwarn > 0) {
 		maxwarn--;
-		printk(KERN_NOTICE "random: %s: uninitialized urandom read "
-		       "(%zd bytes read)\n",
-		       current->comm, nbytes);
+		if (__ratelimit(&urandom_warning))
+			printk(KERN_NOTICE "random: %s: uninitialized "
+			       "urandom read (%zd bytes read)\n",
+			       current->comm, nbytes);
 		spin_lock_irqsave(&primary_crng.lock, flags);
 		crng_init_cnt = 0;
 		spin_unlock_irqrestore(&primary_crng.lock, flags);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 8f890c1..8c0017d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1405,7 +1405,6 @@
 {
 	char debugfs_name[16];
 	struct port *port;
-	struct port_buffer *buf;
 	dev_t devt;
 	unsigned int nr_added_bufs;
 	int err;
@@ -1516,8 +1515,6 @@
 	return 0;
 
 free_inbufs:
-	while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
-		free_buf(buf, true);
 free_device:
 	device_destroy(pdrvdata.class, port->dev->devt);
 free_cdev:
@@ -1542,34 +1539,14 @@
 
 static void remove_port_data(struct port *port)
 {
-	struct port_buffer *buf;
-
 	spin_lock_irq(&port->inbuf_lock);
 	/* Remove unused data this port might have received. */
 	discard_port_data(port);
 	spin_unlock_irq(&port->inbuf_lock);
 
-	/* Remove buffers we queued up for the Host to send us data in. */
-	do {
-		spin_lock_irq(&port->inbuf_lock);
-		buf = virtqueue_detach_unused_buf(port->in_vq);
-		spin_unlock_irq(&port->inbuf_lock);
-		if (buf)
-			free_buf(buf, true);
-	} while (buf);
-
 	spin_lock_irq(&port->outvq_lock);
 	reclaim_consumed_buffers(port);
 	spin_unlock_irq(&port->outvq_lock);
-
-	/* Free pending buffers from the out-queue. */
-	do {
-		spin_lock_irq(&port->outvq_lock);
-		buf = virtqueue_detach_unused_buf(port->out_vq);
-		spin_unlock_irq(&port->outvq_lock);
-		if (buf)
-			free_buf(buf, true);
-	} while (buf);
 }
 
 /*
@@ -1794,13 +1771,24 @@
 	spin_unlock(&portdev->c_ivq_lock);
 }
 
+static void flush_bufs(struct virtqueue *vq, bool can_sleep)
+{
+	struct port_buffer *buf;
+	unsigned int len;
+
+	while ((buf = virtqueue_get_buf(vq, &len)))
+		free_buf(buf, can_sleep);
+}
+
 static void out_intr(struct virtqueue *vq)
 {
 	struct port *port;
 
 	port = find_port_by_vq(vq->vdev->priv, vq);
-	if (!port)
+	if (!port) {
+		flush_bufs(vq, false);
 		return;
+	}
 
 	wake_up_interruptible(&port->waitqueue);
 }
@@ -1811,8 +1799,10 @@
 	unsigned long flags;
 
 	port = find_port_by_vq(vq->vdev->priv, vq);
-	if (!port)
+	if (!port) {
+		flush_bufs(vq, false);
 		return;
+	}
 
 	spin_lock_irqsave(&port->inbuf_lock, flags);
 	port->inbuf = get_inbuf(port);
@@ -1987,6 +1977,15 @@
 
 static void remove_vqs(struct ports_device *portdev)
 {
+	struct virtqueue *vq;
+
+	virtio_device_for_each_vq(portdev->vdev, vq) {
+		struct port_buffer *buf;
+
+		flush_bufs(vq, true);
+		while ((buf = virtqueue_detach_unused_buf(vq)))
+			free_buf(buf, true);
+	}
 	portdev->vdev->config->del_vqs(portdev->vdev);
 	kfree(portdev->in_vqs);
 	kfree(portdev->out_vqs);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index f41307d..0426ff7 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2280,6 +2280,9 @@
 	int ret;
 
 	clk_prepare_lock();
+	/* Always try to update cached phase if possible */
+	if (core->ops->get_phase)
+		core->phase = core->ops->get_phase(core->hw);
 	ret = core->phase;
 	clk_prepare_unlock();
 
diff --git a/drivers/clk/msm/clock-cpu-8939.c b/drivers/clk/msm/clock-cpu-8939.c
index 5ba6bd0..5b8b8f3 100644
--- a/drivers/clk/msm/clock-cpu-8939.c
+++ b/drivers/clk/msm/clock-cpu-8939.c
@@ -999,7 +999,7 @@
 
 	return rc;
 }
-late_initcall(clock_cpu_lpm_get_latency);
+late_initcall_sync(clock_cpu_lpm_get_latency);
 
 #define APCS_C0_PLL			0xb116000
 #define C0_PLL_MODE			0x0
diff --git a/drivers/clk/msm/clock-gcc-8952.c b/drivers/clk/msm/clock-gcc-8952.c
index 6d7727f..d471138 100644
--- a/drivers/clk/msm/clock-gcc-8952.c
+++ b/drivers/clk/msm/clock-gcc-8952.c
@@ -216,6 +216,7 @@
 	.config_reg = (void __iomem *)APCS_C0_PLL_USER_CTL,
 	.status_reg = (void __iomem *)APCS_C0_PLL_STATUS,
 	.freq_tbl = apcs_c0_pll_freq,
+	.config_ctl_reg = (void __iomem *)APCS_C0_PLL_CONFIG_CTL,
 	.masks = {
 		.vco_mask = BM(29, 28),
 		.pre_div_mask = BIT(12),
@@ -283,6 +284,7 @@
 	.config_reg = (void __iomem *)APCS_C1_PLL_USER_CTL,
 	.status_reg = (void __iomem *)APCS_C1_PLL_STATUS,
 	.freq_tbl = apcs_c1_pll_freq,
+	.config_ctl_reg = (void __iomem *)APCS_C1_PLL_CONFIG_CTL,
 	.masks = {
 		.vco_mask = BM(29, 28),
 		.pre_div_mask = BIT(12),
@@ -4407,6 +4409,11 @@
 	if (compat_bin2 || compat_bin4 || compat_bin5)
 		nbases = APCS_C0_PLL_BASE;
 
+	if (compat_bin5 || compat_bin6) {
+		a53ss_c0_pll.c.ops = &clk_ops_acpu_pll;
+		a53ss_c1_pll.c.ops = &clk_ops_acpu_pll;
+	}
+
 	ret = get_mmio_addr(pdev, nbases);
 	if (ret)
 		return ret;
diff --git a/drivers/clk/msm/clock-pll.c b/drivers/clk/msm/clock-pll.c
index 26c04e5..381c8db 100644
--- a/drivers/clk/msm/clock-pll.c
+++ b/drivers/clk/msm/clock-pll.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -217,13 +217,46 @@
 	writel_relaxed(regval, pll_config);
 }
 
+static void pll_wait_for_lock(struct pll_clk *pll)
+{
+	int count;
+	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+	u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
+	u32 status_reg, user_reg, l_reg, m_reg, n_reg, config_reg;
+
+	/* Wait for pll to lock. */
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)
+			break;
+		udelay(1);
+	}
+
+	if (!(readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)) {
+		mode = readl_relaxed(PLL_MODE_REG(pll));
+		status_reg = readl_relaxed(PLL_STATUS_REG(pll));
+		user_reg = readl_relaxed(PLL_CONFIG_REG(pll));
+		config_reg = readl_relaxed(PLL_CFG_CTL_REG(pll));
+		l_reg = readl_relaxed(PLL_L_REG(pll));
+		m_reg = readl_relaxed(PLL_M_REG(pll));
+		n_reg = readl_relaxed(PLL_N_REG(pll));
+		pr_err("count = %d\n", (int)count);
+		pr_err("mode register is 0x%x\n", mode);
+		pr_err("status register is 0x%x\n", status_reg);
+		pr_err("user control register is 0x%x\n", user_reg);
+		pr_err("config control register is 0x%x\n", config_reg);
+		pr_err("L value register is 0x%x\n", l_reg);
+		pr_err("M value register is 0x%x\n", m_reg);
+		pr_err("N value control register is 0x%x\n", n_reg);
+		panic("PLL %s didn't lock after enabling it!\n",
+				pll->c.dbg_name);
+	}
+}
+
 static int sr2_pll_clk_enable(struct clk *c)
 {
 	unsigned long flags;
 	struct pll_clk *pll = to_pll_clk(c);
-	int ret = 0, count;
 	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
-	u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
 
 	spin_lock_irqsave(&pll_reg_lock, flags);
 
@@ -245,15 +278,7 @@
 	mode |= PLL_RESET_N;
 	writel_relaxed(mode, PLL_MODE_REG(pll));
 
-	/* Wait for pll to lock. */
-	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
-		if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)
-			break;
-		udelay(1);
-	}
-
-	if (!(readl_relaxed(PLL_STATUS_REG(pll)) & lockmask))
-		pr_err("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+	pll_wait_for_lock(pll);
 
 	/* Enable PLL output. */
 	mode |= PLL_OUTCTRL;
@@ -263,7 +288,50 @@
 	mb();
 
 	spin_unlock_irqrestore(&pll_reg_lock, flags);
-	return ret;
+	return 0;
+}
+
+static int acpu_pll_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+
+	spm_event(pll->spm_ctrl.spm_base, pll->spm_ctrl.offset,
+				pll->spm_ctrl.event_bit, false);
+
+	/* Disable PLL bypass mode. */
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset. Delay 10us just to be safe.
+	 */
+	mb();
+	udelay(10);
+
+	/* De-assert active-low PLL reset. */
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* PLL H/W requires a 50uSec delay before polling lock_detect. */
+	mb();
+	udelay(50);
+
+	pll_wait_for_lock(pll);
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+	return 0;
 }
 
 void __variable_rate_pll_init(struct clk *c)
@@ -886,6 +954,15 @@
 	.list_registers = local_pll_clk_list_registers,
 };
 
+const struct clk_ops clk_ops_acpu_pll = {
+	.enable = acpu_pll_clk_enable,
+	.disable = local_pll_clk_disable,
+	.set_rate = local_pll_clk_set_rate,
+	.round_rate = local_pll_clk_round_rate,
+	.handoff = local_pll_clk_handoff,
+	.list_registers = local_pll_clk_list_registers,
+};
+
 const struct clk_ops clk_ops_variable_rate_pll_hwfsm = {
 	.enable = variable_rate_pll_clk_enable_hwfsm,
 	.disable = variable_rate_pll_clk_disable_hwfsm,
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm-util.c b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm-util.c
index f2ed36c..0bdfeeb 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm-util.c
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm-util.c
@@ -165,7 +165,8 @@
 	return sel;
 }
 
-static bool pll_is_pll_locked_12nm(struct mdss_pll_resources *pll)
+static bool pll_is_pll_locked_12nm(struct mdss_pll_resources *pll,
+	bool is_handoff)
 {
 	u32 status;
 	bool pll_locked;
@@ -177,8 +178,9 @@
 			((status & BIT(1)) > 0),
 			DSI_PLL_POLL_MAX_READS,
 			DSI_PLL_POLL_TIMEOUT_US)) {
-		pr_err("DSI PLL ndx=%d status=%x failed to Lock\n",
-			pll->index, status);
+		if (!is_handoff)
+			pr_err("DSI PLL ndx=%d status=%x failed to Lock\n",
+				pll->index, status);
 		pll_locked = false;
 	} else {
 		pll_locked = true;
@@ -213,7 +215,7 @@
 	wmb(); /* make sure register committed before enabling branch clocks */
 	udelay(50); /* h/w recommended delay */
 
-	if (!pll_is_pll_locked_12nm(pll)) {
+	if (!pll_is_pll_locked_12nm(pll, false)) {
 		pr_err("DSI PLL ndx=%d lock failed!\n",
 			pll->index);
 		rc = -EINVAL;
@@ -261,7 +263,7 @@
 	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_POWERUP_CTRL, data);
 	ndelay(500); /* h/w recommended delay */
 
-	if (!pll_is_pll_locked_12nm(pll)) {
+	if (!pll_is_pll_locked_12nm(pll, false)) {
 		pr_err("DSI PLL ndx=%d lock failed!\n",
 			pll->index);
 		rc = -EINVAL;
@@ -556,6 +558,142 @@
 	param->gmp_cntrl = 0x1;
 }
 
+static u32 __mdss_dsi_get_multi_intX100(u64 vco_rate, u32 *rem)
+{
+	u32 reminder = 0;
+	u64 temp = 0;
+	const u32 ref_clk_rate = 19200000, quarterX100 = 25;
+
+	temp = div_u64_rem(vco_rate, ref_clk_rate, &reminder);
+	temp *= 100;
+
+	/*
+	 * Multiplication integer needs to be floored in steps of 0.25
+	 * Hence multi_intX100 needs to be rounded off in steps of 25
+	 */
+	if (reminder < (ref_clk_rate / 4)) {
+		*rem = reminder;
+		return temp;
+	} else if ((reminder >= (ref_clk_rate / 4)) &&
+		reminder < (ref_clk_rate / 2)) {
+		*rem = (reminder - (ref_clk_rate / 4));
+		return (temp + quarterX100);
+	} else if ((reminder >= (ref_clk_rate / 2)) &&
+		(reminder < ((3 * ref_clk_rate) / 4))) {
+		*rem = (reminder - (ref_clk_rate / 2));
+		return (temp + (quarterX100 * 2));
+	}
+
+	*rem = (reminder - ((3 * ref_clk_rate) / 4));
+	return (temp + (quarterX100 * 3));
+}
+
+static u32 __calc_gcd(u32 num1, u32 num2)
+{
+	if (num2 != 0)
+		return __calc_gcd(num2, (num1 % num2));
+	else
+		return num1;
+}
+
+static void mdss_dsi_pll_12nm_calc_ssc(struct mdss_pll_resources *pll,
+					struct dsi_pll_db *pdb)
+{
+	struct dsi_pll_param *param = &pdb->param;
+	u64 multi_intX100 = 0, temp = 0;
+	u32 temp_rem1 = 0, temp_rem2 = 0;
+	const u64 power_2_17 = 131072, power_2_10 = 1024;
+	const u32 ref_clk_rate = 19200000;
+
+	multi_intX100 = __mdss_dsi_get_multi_intX100(pll->vco_current_rate,
+		&temp_rem1);
+
+	/* Calculation for mpll_ssc_peak_i */
+	temp = (multi_intX100 * pll->ssc_ppm * power_2_17);
+	temp = div_u64(temp, 100); /* 100 div for multi_intX100 */
+	param->mpll_ssc_peak_i =
+		(u32) div_u64(temp, 1000000); /*10^6 for SSC PPM */
+
+	/* Calculation for mpll_stepsize_i */
+	param->mpll_stepsize_i = (u32) div_u64((param->mpll_ssc_peak_i *
+		pll->ssc_freq * power_2_10), ref_clk_rate);
+
+	/* Calculation for mpll_mint_i */
+	param->mpll_mint_i = (u32) (div_u64((multi_intX100 * 4), 100) - 32);
+
+	/* Calculation for mpll_frac_den */
+	param->mpll_frac_den = (u32) div_u64(ref_clk_rate,
+		__calc_gcd((u32)pll->vco_current_rate, ref_clk_rate));
+
+	/* Calculation for mpll_frac_quot_i */
+	temp = (temp_rem1 * power_2_17);
+	param->mpll_frac_quot_i =
+		(u32) div_u64_rem(temp, ref_clk_rate, &temp_rem2);
+
+	/* Calculation for mpll_frac_rem */
+	param->mpll_frac_rem = (u32) div_u64(((u64)temp_rem2 *
+		param->mpll_frac_den), ref_clk_rate);
+
+	pr_debug("mpll_ssc_peak_i=%d mpll_stepsize_i=%d mpll_mint_i=%d\n",
+		param->mpll_ssc_peak_i, param->mpll_stepsize_i,
+		param->mpll_mint_i);
+	pr_debug("mpll_frac_den=%d mpll_frac_quot_i=%d mpll_frac_rem=%d",
+		param->mpll_frac_den, param->mpll_frac_quot_i,
+		param->mpll_frac_rem);
+}
+
+static void pll_db_commit_12nm_ssc(struct mdss_pll_resources *pll,
+					struct dsi_pll_db *pdb)
+{
+	void __iomem *pll_base = pll->pll_base;
+	struct dsi_pll_param *param = &pdb->param;
+	char data = 0;
+
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC0, 0x27);
+
+	data = (param->mpll_mint_i & 0xff);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC7, data);
+
+	data = ((param->mpll_mint_i & 0xff00) >> 8);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC8, data);
+
+	data = (param->mpll_ssc_peak_i & 0xff);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC1, data);
+
+	data = ((param->mpll_ssc_peak_i & 0xff00) >> 8);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC2, data);
+
+	data = ((param->mpll_ssc_peak_i & 0xf0000) >> 16);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC3, data);
+
+	data = (param->mpll_stepsize_i & 0xff);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC4, data);
+
+	data = ((param->mpll_stepsize_i & 0xff00) >> 8);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC5, data);
+
+	data = ((param->mpll_stepsize_i & 0x1f0000) >> 16);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC6, data);
+
+	data = (param->mpll_frac_quot_i & 0xff);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC10, data);
+
+	data = ((param->mpll_frac_quot_i & 0xff00) >> 8);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC11, data);
+
+	data = (param->mpll_frac_rem & 0xff);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC12, data);
+
+	data = ((param->mpll_frac_rem & 0xff00) >> 8);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC13, data);
+
+	data = (param->mpll_frac_den & 0xff);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC14, data);
+
+	data = ((param->mpll_frac_den & 0xff00) >> 8);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_SSC15, data);
+}
+
 static void pll_db_commit_12nm(struct mdss_pll_resources *pll,
 					struct dsi_pll_db *pdb)
 {
@@ -616,6 +754,9 @@
 	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PRO_DLY_RELOCK, 0x0c);
 	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_LOCK_DET_MODE_SEL, 0x02);
 
+	if (pll->ssc_en)
+		pll_db_commit_12nm_ssc(pll, pdb);
+
 	pr_debug("pll:%d\n", pll->index);
 	wmb(); /* make sure register committed before preparing the clocks */
 }
@@ -710,7 +851,7 @@
 		return ret;
 	}
 
-	if (pll_is_pll_locked_12nm(pll)) {
+	if (pll_is_pll_locked_12nm(pll, true)) {
 		pll->handoff_resources = true;
 		pll->pll_on = true;
 		c->rate = pll_vco_get_rate_12nm(c);
@@ -756,18 +897,27 @@
 					rc, pll->index);
 			goto error;
 		}
+	}
 
-		data = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SYS_CTRL);
-		if (data & BIT(7)) { /* DSI PHY in LP-11 or ULPS */
-			rc = dsi_pll_relock(pll);
-			if (rc)
-				goto error;
-			else
-				goto end;
-		}
+	/*
+	 * For cases where  DSI PHY is already enabled like:
+	 * 1.) LP-11 during static screen
+	 * 2.) ULPS during static screen
+	 * 3.) Boot up with cont splash enabled where PHY is programmed in LK
+	 * Execute the Re-lock sequence to enable the DSI PLL.
+	 */
+	data = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SYS_CTRL);
+	if (data & BIT(7)) {
+		rc = dsi_pll_relock(pll);
+		if (rc)
+			goto error;
+		else
+			goto end;
 	}
 
 	mdss_dsi_pll_12nm_calc_reg(pll, pdb);
+	if (pll->ssc_en)
+		mdss_dsi_pll_12nm_calc_ssc(pll, pdb);
 
 	/* commit DSI vco  */
 	pll_db_commit_12nm(pll, pdb);
@@ -802,6 +952,7 @@
 {
 	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
 	struct mdss_pll_resources *pll = vco->priv;
+	u32 data = 0;
 
 	if (!pll) {
 		pr_err("Dsi pll resources are not available\n");
@@ -813,7 +964,9 @@
 		return -EINVAL;
 	}
 
-	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_SSC0, 0x40);
+	data = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SSC0);
+	data |= BIT(6); /* enable GP_CLK_EN */
+	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_SSC0, data);
 	wmb(); /* make sure register committed before enabling branch clocks */
 
 	return 0;
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.c b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.c
index 210742b..5d2fa9a 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.c
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.c
@@ -636,11 +636,13 @@
 };
 
 static struct clk_lookup mdss_dsi_pllcc_12nm[] = {
+	CLK_LIST(dsi0pll_vco_clk),
 	CLK_LIST(dsi0pll_byte_clk_src),
 	CLK_LIST(dsi0pll_pixel_clk_src),
 };
 
 static struct clk_lookup mdss_dsi_pllcc_12nm_1[] = {
+	CLK_LIST(dsi1pll_vco_clk),
 	CLK_LIST(dsi1pll_byte_clk_src),
 	CLK_LIST(dsi1pll_pixel_clk_src),
 };
@@ -650,6 +652,9 @@
 {
 	int rc = 0, ndx;
 	struct dsi_pll_db *pdb;
+	int const ssc_freq_min = 30000; /* min. recommended freq. value */
+	int const ssc_freq_max = 33000; /* max. recommended freq. value */
+	int const ssc_ppm_max = 5000; /* max. recommended ppm */
 
 	if (!pdev || !pdev->dev.of_node) {
 		pr_err("Invalid input parameters\n");
@@ -680,6 +685,21 @@
 	pixel_div_clk_src_ops = clk_ops_div;
 	pixel_div_clk_src_ops.prepare = dsi_pll_div_prepare;
 
+	if (pll_res->ssc_en) {
+		if (!pll_res->ssc_freq || (pll_res->ssc_freq < ssc_freq_min) ||
+			(pll_res->ssc_freq > ssc_freq_max)) {
+			pll_res->ssc_freq = ssc_freq_min;
+			pr_debug("SSC frequency out of recommended range. Set to default=%d\n",
+				pll_res->ssc_freq);
+		}
+
+		if (!pll_res->ssc_ppm || (pll_res->ssc_ppm > ssc_ppm_max)) {
+			pll_res->ssc_ppm = ssc_ppm_max;
+			pr_debug("SSC PPM out of recommended range. Set to default=%d\n",
+				pll_res->ssc_ppm);
+		}
+	}
+
 	/* Set client data to mux, div and vco clocks.  */
 	if (pll_res->index == DSI_PLL_1) {
 		dsi1pll_byte_clk_src.priv = pll_res;
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.h b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.h
index 6912ff4..0974717 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.h
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.h
@@ -37,6 +37,22 @@
 #define DSIPHY_PLL_LOOP_DIV_RATIO_1			0x2e8
 #define DSIPHY_SLEWRATE_DDL_CYC_FRQ_ADJ_1		0x328
 #define DSIPHY_SSC0					0x394
+#define DSIPHY_SSC7					0x3b0
+#define DSIPHY_SSC8					0x3b4
+#define DSIPHY_SSC1					0x398
+#define DSIPHY_SSC2					0x39c
+#define DSIPHY_SSC3					0x3a0
+#define DSIPHY_SSC4					0x3a4
+#define DSIPHY_SSC5					0x3a8
+#define DSIPHY_SSC6					0x3ac
+#define DSIPHY_SSC10					0x360
+#define DSIPHY_SSC11					0x364
+#define DSIPHY_SSC12					0x368
+#define DSIPHY_SSC13					0x36c
+#define DSIPHY_SSC14					0x370
+#define DSIPHY_SSC15					0x374
+#define DSIPHY_SSC7					0x3b0
+#define DSIPHY_SSC8					0x3b4
 #define DSIPHY_SSC9					0x3b8
 #define DSIPHY_STAT0					0x3e0
 #define DSIPHY_CTRL0					0x3e8
@@ -58,6 +74,14 @@
 	u32 post_div_mux;
 	u32 pixel_divhf;
 	u32 fsm_ovr_ctrl;
+
+	/* ssc_params */
+	u32 mpll_ssc_peak_i;
+	u32 mpll_stepsize_i;
+	u32 mpll_mint_i;
+	u32 mpll_frac_den;
+	u32 mpll_frac_quot_i;
+	u32 mpll_frac_rem;
 };
 
 enum {
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 836c25c..7f9ba03 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -833,6 +833,7 @@
 };
 
 static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+	F(8000000, P_CAM_CC_PLL2_OUT_EVEN, 10, 1, 6),
 	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(24000000, P_CAM_CC_PLL2_OUT_EVEN, 10, 1, 2),
 	F(33333333, P_CAM_CC_PLL0_OUT_EVEN, 2, 1, 9),
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index d426691..316ac39 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2013-2014, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2014, 2017-2018,
+ * The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -30,7 +31,9 @@
 struct qcom_cc {
 	struct qcom_reset_controller reset;
 	struct clk_regmap **rclks;
+	struct clk_hw **hwclks;
 	size_t num_rclks;
+	size_t num_hwclks;
 };
 
 const
@@ -182,11 +185,14 @@
 	struct qcom_cc *cc = data;
 	unsigned int idx = clkspec->args[0];
 
-	if (idx >= cc->num_rclks) {
+	if (idx >= cc->num_rclks + cc->num_hwclks) {
 		pr_err("invalid index %u\n", idx);
 		return ERR_PTR(-EINVAL);
 	}
 
+	if (idx < cc->num_hwclks && cc->hwclks[idx])
+		return cc->hwclks[idx];
+
 	return cc->rclks[idx] ? &cc->rclks[idx]->hw : ERR_PTR(-ENOENT);
 }
 
@@ -199,7 +205,9 @@
 	struct qcom_cc *cc;
 	struct gdsc_desc *scd;
 	size_t num_clks = desc->num_clks;
+	size_t num_hwclks = desc->num_hwclks;
 	struct clk_regmap **rclks = desc->clks;
+	struct clk_hw **hwclks = desc->hwclks;
 
 	cc = devm_kzalloc(dev, sizeof(*cc), GFP_KERNEL);
 	if (!cc)
@@ -207,6 +215,17 @@
 
 	cc->rclks = rclks;
 	cc->num_rclks = num_clks;
+	cc->hwclks = hwclks;
+	cc->num_hwclks = num_hwclks;
+
+	for (i = 0; i < num_hwclks; i++) {
+		if (!hwclks[i])
+			continue;
+
+		ret = devm_clk_hw_register(dev, hwclks[i]);
+		if (ret)
+			return ret;
+	}
 
 	for (i = 0; i < num_clks; i++) {
 		if (!rclks[i])
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 5e26763..29c4697 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -28,7 +28,9 @@
 struct qcom_cc_desc {
 	const struct regmap_config *config;
 	struct clk_regmap **clks;
+	struct clk_hw **hwclks;
 	size_t num_clks;
+	size_t num_hwclks;
 	const struct qcom_reset_map *resets;
 	size_t num_resets;
 	struct gdsc **gdscs;
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 555b8bd..7ea5d9d 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@
 #include "reset.h"
 #include "clk-alpha-pll.h"
 #include "vdd-level-sdm845.h"
+#include "clk-voter.h"
 
 #define GCC_MMSS_MISC				0x09FFC
 #define GCC_GPU_MISC				0x71028
@@ -1505,6 +1506,11 @@
 	},
 };
 
+static DEFINE_CLK_VOTER(ufs_phy_axi_emmc_vote_clk,
+					gcc_aggre_ufs_phy_axi_clk, 0);
+static DEFINE_CLK_VOTER(ufs_phy_axi_ufs_vote_clk,
+					gcc_aggre_ufs_phy_axi_clk, 0);
+
 static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
 	.halt_reg = 0x82024,
 	.clkr = {
@@ -3780,6 +3786,8 @@
 	[MEASURE_ONLY_CNOC_CLK] = &measure_only_cnoc_clk.hw,
 	[MEASURE_ONLY_BIMC_CLK] = &measure_only_bimc_clk.hw,
 	[MEASURE_ONLY_IPA_2X_CLK] = &measure_only_ipa_2x_clk.hw,
+	[UFS_PHY_AXI_EMMC_VOTE_CLK] = &ufs_phy_axi_emmc_vote_clk.hw,
+	[UFS_PHY_AXI_UFS_VOTE_CLK] = &ufs_phy_axi_ufs_vote_clk.hw,
 };
 
 static struct clk_regmap *gcc_sdm845_clocks[] = {
@@ -4061,6 +4069,8 @@
 	.config = &gcc_sdm845_regmap_config,
 	.clks = gcc_sdm845_clocks,
 	.num_clks = ARRAY_SIZE(gcc_sdm845_clocks),
+	.hwclks = gcc_sdm845_hws,
+	.num_hwclks = ARRAY_SIZE(gcc_sdm845_hws),
 	.resets = gcc_sdm845_resets,
 	.num_resets = ARRAY_SIZE(gcc_sdm845_resets),
 };
@@ -4279,9 +4289,8 @@
 
 static int gcc_sdm845_probe(struct platform_device *pdev)
 {
-	struct clk *clk;
 	struct regmap *regmap;
-	int i, ret = 0;
+	int ret = 0;
 
 	regmap = qcom_cc_map(pdev, &gcc_sdm845_desc);
 	if (IS_ERR(regmap))
@@ -4307,13 +4316,6 @@
 	if (ret)
 		return ret;
 
-	/* Register the dummy measurement clocks */
-	for (i = 0; i < ARRAY_SIZE(gcc_sdm845_hws); i++) {
-		clk = devm_clk_register(&pdev->dev, gcc_sdm845_hws[i]);
-		if (IS_ERR(clk))
-			return PTR_ERR(clk);
-	}
-
 	ret = qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register GCC clocks\n");
diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c
index 0899138..0d7ed80 100644
--- a/drivers/clk/qcom/gdsc-regulator.c
+++ b/drivers/clk/qcom/gdsc-regulator.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -34,6 +34,10 @@
 #define PWR_ON_MASK		BIT(31)
 #define CLK_DIS_WAIT_MASK	(0xF << 12)
 #define CLK_DIS_WAIT_SHIFT	(12)
+#define EN_FEW_WAIT_MASK	(0xF << 16)
+#define EN_FEW_WAIT_SHIFT	(16)
+#define EN_REST_WAIT_MASK	(0xF << 20)
+#define EN_REST_WAIT_SHIFT	(20)
 #define SW_OVERRIDE_MASK	BIT(2)
 #define HW_CONTROL_MASK		BIT(1)
 #define SW_COLLAPSE_MASK	BIT(0)
@@ -535,6 +539,7 @@
 	struct resource *res;
 	struct gdsc *sc;
 	uint32_t regval, clk_dis_wait_val = 0;
+	uint32_t en_few_wait_val, en_rest_wait_val;
 	bool retain_mem, retain_periph, support_hw_trigger, prop_val;
 	int i, ret;
 	u32 timeout;
@@ -681,6 +686,22 @@
 		regval |= clk_dis_wait_val;
 	}
 
+	if (!of_property_read_u32(pdev->dev.of_node, "qcom,en-few-wait-val",
+				  &en_few_wait_val)) {
+		en_few_wait_val <<= EN_FEW_WAIT_SHIFT;
+
+		regval &= ~(EN_FEW_WAIT_MASK);
+		regval |= en_few_wait_val;
+	}
+
+	if (!of_property_read_u32(pdev->dev.of_node, "qcom,en-rest-wait-val",
+				  &en_rest_wait_val)) {
+		en_rest_wait_val <<= EN_REST_WAIT_SHIFT;
+
+		regval &= ~(EN_REST_WAIT_MASK);
+		regval |= en_rest_wait_val;
+	}
+
 	regmap_write(sc->regmap, REG_OFFSET, regval);
 
 	sc->no_status_check_on_disable =
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 077fcdc..fe7d9ed 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -58,6 +58,12 @@
 	u16 degrees;
 	u32 delay_num = 0;
 
+	/* See the comment for rockchip_mmc_set_phase below */
+	if (!rate) {
+		pr_err("%s: invalid clk rate\n", __func__);
+		return -EINVAL;
+	}
+
 	raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
 
 	degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
@@ -84,6 +90,23 @@
 	u32 raw_value;
 	u32 delay;
 
+	/*
+	 * The below calculation is based on the output clock from
+	 * MMC host to the card, which expects the phase clock inherits
+	 * the clock rate from its parent, namely the output clock
+	 * provider of MMC host. However, things may go wrong if
+	 * (1) It is orphan.
+	 * (2) It is assigned to the wrong parent.
+	 *
+	 * This check help debug the case (1), which seems to be the
+	 * most likely problem we often face and which makes it difficult
+	 * for people to debug unstable mmc tuning results.
+	 */
+	if (!rate) {
+		pr_err("%s: invalid clk rate\n", __func__);
+		return -EINVAL;
+	}
+
 	nineties = degrees / 90;
 	remainder = (degrees % 90);
 
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index db6e5a9..53f16ef 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -369,7 +369,7 @@
 			RK2928_CLKSEL_CON(23), 5, 2, MFLAGS, 0, 6, DFLAGS,
 			RK2928_CLKGATE_CON(2), 15, GFLAGS),
 
-	COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
+	COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0,
 			RK2928_CLKSEL_CON(11), 8, 2, MFLAGS, 0, 8, DFLAGS,
 			RK2928_CLKGATE_CON(2), 11, GFLAGS),
 
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 1b81e28..ed36728 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -698,7 +698,7 @@
 	PLL_36XX_RATE(144000000,  96, 2, 3,     0),
 	PLL_36XX_RATE( 96000000, 128, 2, 4,     0),
 	PLL_36XX_RATE( 84000000, 112, 2, 4,     0),
-	PLL_36XX_RATE( 80000004, 106, 2, 4, 43691),
+	PLL_36XX_RATE( 80000003, 106, 2, 4, 43691),
 	PLL_36XX_RATE( 73728000,  98, 2, 4, 19923),
 	PLL_36XX_RATE( 67737598, 270, 3, 5, 62285),
 	PLL_36XX_RATE( 65535999, 174, 2, 5, 49982),
@@ -734,7 +734,7 @@
 	PLL_36XX_RATE(148352005,  98, 2, 3, 59070),
 	PLL_36XX_RATE(108000000, 144, 2, 4,     0),
 	PLL_36XX_RATE( 74250000,  99, 2, 4,     0),
-	PLL_36XX_RATE( 74176002,  98, 3, 4, 59070),
+	PLL_36XX_RATE( 74176002,  98, 2, 4, 59070),
 	PLL_36XX_RATE( 54054000, 216, 3, 5, 14156),
 	PLL_36XX_RATE( 54000000, 144, 2, 5,     0),
 	{ /* sentinel */ }
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 27a227d..6a0cb8a 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -711,13 +711,13 @@
 	/* sorted in descending order */
 	/* PLL_36XX_RATE(rate, m, p, s, k) */
 	PLL_36XX_RATE(192000000, 64, 2, 2, 0),
-	PLL_36XX_RATE(180633600, 90, 3, 2, 20762),
+	PLL_36XX_RATE(180633605, 90, 3, 2, 20762),
 	PLL_36XX_RATE(180000000, 90, 3, 2, 0),
 	PLL_36XX_RATE(73728000, 98, 2, 4, 19923),
-	PLL_36XX_RATE(67737600, 90, 2, 4, 20762),
+	PLL_36XX_RATE(67737602, 90, 2, 4, 20762),
 	PLL_36XX_RATE(49152000, 98, 3, 4, 19923),
-	PLL_36XX_RATE(45158400, 90, 3, 4, 20762),
-	PLL_36XX_RATE(32768000, 131, 3, 5, 4719),
+	PLL_36XX_RATE(45158401, 90, 3, 4, 20762),
+	PLL_36XX_RATE(32768001, 131, 3, 5, 4719),
 	{ },
 };
 
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index fd1d9bf..8eae175 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -65,7 +65,7 @@
 	PLL_36XX_RATE(480000000, 160, 2, 2, 0),
 	PLL_36XX_RATE(432000000, 144, 2, 2, 0),
 	PLL_36XX_RATE(400000000, 200, 3, 2, 0),
-	PLL_36XX_RATE(394073130, 459, 7, 2, 49282),
+	PLL_36XX_RATE(394073128, 459, 7, 2, 49282),
 	PLL_36XX_RATE(333000000, 111, 2, 2, 0),
 	PLL_36XX_RATE(300000000, 100, 2, 2, 0),
 	PLL_36XX_RATE(266000000, 266, 3, 3, 0),
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 2fe0573..09cdd35 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -725,7 +725,7 @@
 	PLL_35XX_RATE(800000000U,  400, 6,  1),
 	PLL_35XX_RATE(733000000U,  733, 12, 1),
 	PLL_35XX_RATE(700000000U,  175, 3,  1),
-	PLL_35XX_RATE(667000000U,  222, 4,  1),
+	PLL_35XX_RATE(666000000U,  222, 4,  1),
 	PLL_35XX_RATE(633000000U,  211, 4,  1),
 	PLL_35XX_RATE(600000000U,  500, 5,  2),
 	PLL_35XX_RATE(552000000U,  460, 5,  2),
@@ -751,12 +751,12 @@
 /* AUD_PLL */
 static const struct samsung_pll_rate_table exynos5443_aud_pll_rates[] __initconst = {
 	PLL_36XX_RATE(400000000U, 200, 3, 2,      0),
-	PLL_36XX_RATE(393216000U, 197, 3, 2, -25690),
+	PLL_36XX_RATE(393216003U, 197, 3, 2, -25690),
 	PLL_36XX_RATE(384000000U, 128, 2, 2,      0),
-	PLL_36XX_RATE(368640000U, 246, 4, 2, -15729),
-	PLL_36XX_RATE(361507200U, 181, 3, 2, -16148),
-	PLL_36XX_RATE(338688000U, 113, 2, 2,  -6816),
-	PLL_36XX_RATE(294912000U,  98, 1, 3,  19923),
+	PLL_36XX_RATE(368639991U, 246, 4, 2, -15729),
+	PLL_36XX_RATE(361507202U, 181, 3, 2, -16148),
+	PLL_36XX_RATE(338687988U, 113, 2, 2,  -6816),
+	PLL_36XX_RATE(294912002U,  98, 1, 3,  19923),
 	PLL_36XX_RATE(288000000U,  96, 1, 3,      0),
 	PLL_36XX_RATE(252000000U,  84, 1, 3,      0),
 	{ /* sentinel */ }
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
index 5931a41..bbfa57b 100644
--- a/drivers/clk/samsung/clk-exynos7.c
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -140,7 +140,7 @@
 };
 
 static const struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initconst = {
-	PLL_36XX_RATE(491520000, 20, 1, 0, 31457),
+	PLL_36XX_RATE(491519897, 20, 1, 0, 31457),
 	{},
 };
 
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index d7a1e77..5f50037 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -168,7 +168,7 @@
 	PLL_35XX_RATE(226000000, 105, 1, 1),
 	PLL_35XX_RATE(210000000, 132, 2, 1),
 	/* 2410 common */
-	PLL_35XX_RATE(203000000, 161, 3, 1),
+	PLL_35XX_RATE(202800000, 161, 3, 1),
 	PLL_35XX_RATE(192000000, 88, 1, 1),
 	PLL_35XX_RATE(186000000, 85, 1, 1),
 	PLL_35XX_RATE(180000000, 82, 1, 1),
@@ -178,18 +178,18 @@
 	PLL_35XX_RATE(147000000, 90, 2, 1),
 	PLL_35XX_RATE(135000000, 82, 2, 1),
 	PLL_35XX_RATE(124000000, 116, 1, 2),
-	PLL_35XX_RATE(118000000, 150, 2, 2),
+	PLL_35XX_RATE(118500000, 150, 2, 2),
 	PLL_35XX_RATE(113000000, 105, 1, 2),
-	PLL_35XX_RATE(101000000, 127, 2, 2),
+	PLL_35XX_RATE(101250000, 127, 2, 2),
 	PLL_35XX_RATE(90000000, 112, 2, 2),
-	PLL_35XX_RATE(85000000, 105, 2, 2),
+	PLL_35XX_RATE(84750000, 105, 2, 2),
 	PLL_35XX_RATE(79000000, 71, 1, 2),
-	PLL_35XX_RATE(68000000, 82, 2, 2),
-	PLL_35XX_RATE(56000000, 142, 2, 3),
+	PLL_35XX_RATE(67500000, 82, 2, 2),
+	PLL_35XX_RATE(56250000, 142, 2, 3),
 	PLL_35XX_RATE(48000000, 120, 2, 3),
-	PLL_35XX_RATE(51000000, 161, 3, 3),
+	PLL_35XX_RATE(50700000, 161, 3, 3),
 	PLL_35XX_RATE(45000000, 82, 1, 3),
-	PLL_35XX_RATE(34000000, 82, 2, 3),
+	PLL_35XX_RATE(33750000, 82, 2, 3),
 	{ /* sentinel */ },
 };
 
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index b385536..66d1fc7 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -1145,6 +1145,8 @@
 	.enable = clk_pllu_enable,
 	.disable = clk_pll_disable,
 	.recalc_rate = clk_pll_recalc_rate,
+	.round_rate = clk_pll_round_rate,
+	.set_rate = clk_pll_set_rate,
 };
 
 static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index cd6d307..91cf857 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -331,6 +331,15 @@
 	  This option enables support for reading the ARM architected timer's
 	  virtual counter in userspace.
 
+config MSM_TIMER_LEAP
+	bool "ARCH TIMER counter rollover"
+	default n
+	depends on ARM_ARCH_TIMER && ARM64
+	help
+	  This option enables a check for least significant 32 bits of
+	  counter rollover. On every counter read if least significant
+	  32 bits are set, reread counter.
+
 config ARM_GLOBAL_TIMER
 	bool "Support for the ARM global timer" if COMPILE_TEST
 	select CLKSRC_OF if OF
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
index 738515b..a22c1d7 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -281,7 +281,7 @@
 
 static unsigned long __init ftm_clk_init(struct device_node *np)
 {
-	unsigned long freq;
+	long freq;
 
 	freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt");
 	if (freq <= 0)
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 4852d9e..9f09752 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -151,9 +151,19 @@
 	policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num);
 	policy->shared_type = cpu->shared_type;
 
-	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
+		int i;
+
 		cpumask_copy(policy->cpus, cpu->shared_cpu_map);
-	else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
+
+		for_each_cpu(i, policy->cpus) {
+			if (unlikely(i == policy->cpu))
+				continue;
+
+			memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
+			       sizeof(cpu->perf_caps));
+		}
+	} else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
 		/* Support only SW_ANY for now. */
 		pr_debug("Unsupported CPU co-ord type\n");
 		return -EFAULT;
@@ -218,8 +228,13 @@
 	return ret;
 
 out:
-	for_each_possible_cpu(i)
-		kfree(all_cpu_data[i]);
+	for_each_possible_cpu(i) {
+		cpu = all_cpu_data[i];
+		if (!cpu)
+			break;
+		free_cpumask_var(cpu->shared_cpu_map);
+		kfree(cpu);
+	}
 
 	kfree(all_cpu_data);
 	return -ENODEV;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d82ce73..f474e70 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1409,14 +1409,14 @@
 	return 0;
 
 out_exit_policy:
+	for_each_cpu(j, policy->real_cpus)
+		remove_cpu_dev_symlink(policy, get_cpu_device(j));
+
 	up_write(&policy->rwsem);
 
 	if (cpufreq_driver->exit)
 		cpufreq_driver->exit(policy);
 
-	for_each_cpu(j, policy->real_cpus)
-		remove_cpu_dev_symlink(policy, get_cpu_device(j));
-
 out_free_policy:
 	cpufreq_policy_free(policy, !new_policy);
 	return ret;
@@ -1506,6 +1506,9 @@
 		policy->freq_table = NULL;
 	}
 
+	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+			CPUFREQ_STOP, policy);
+
 unlock:
 	up_write(&policy->rwsem);
 	return 0;
@@ -1654,7 +1657,10 @@
 
 	if (policy) {
 		down_read(&policy->rwsem);
-		ret_freq = __cpufreq_get(policy);
+
+		if (!policy_is_inactive(policy))
+			ret_freq = __cpufreq_get(policy);
+
 		up_read(&policy->rwsem);
 
 		cpufreq_cpu_put(policy);
@@ -2397,6 +2403,11 @@
 
 	down_write(&policy->rwsem);
 
+	if (policy_is_inactive(policy)) {
+		ret = -ENODEV;
+		goto unlock;
+	}
+
 	pr_debug("updating policy for CPU %u\n", cpu);
 	memcpy(&new_policy, policy, sizeof(*policy));
 	new_policy.min = policy->user_policy.min;
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 6fb3cd2..a1d7fa4 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -599,6 +599,16 @@
 
 	if (!spin_trylock(&gpstates->gpstate_lock))
 		return;
+	/*
+	 * If the timer has migrated to the different cpu then bring
+	 * it back to one of the policy->cpus
+	 */
+	if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
+		gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
+		add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
+		spin_unlock(&gpstates->gpstate_lock);
+		return;
+	}
 
 	gpstates->last_sampled_time += time_diff;
 	gpstates->elapsed_time += time_diff;
@@ -626,10 +636,8 @@
 	gpstates->last_gpstate_idx = pstate_to_idx(freq_data.gpstate_id);
 	gpstates->last_lpstate_idx = pstate_to_idx(freq_data.pstate_id);
 
+	set_pstate(&freq_data);
 	spin_unlock(&gpstates->gpstate_lock);
-
-	/* Timer may get migrated to a different cpu on cpu hot unplug */
-	smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
 }
 
 /*
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 21340e0..f521448 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -4,6 +4,7 @@
 config ARM_CPUIDLE
         bool "Generic ARM/ARM64 CPU idle Driver"
         select DT_IDLE_STATES
+	select CPU_IDLE_MULTIPLE_DRIVERS
         help
           Select this to enable generic cpuidle driver for ARM.
           It provides a generic idle driver whose idle states are configured
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index cb3c48a..99ad22e 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -27,4 +27,8 @@
 # POWERPC drivers
 obj-$(CONFIG_PSERIES_CPUIDLE)		+= cpuidle-pseries.o
 obj-$(CONFIG_POWERNV_CPUIDLE)		+= cpuidle-powernv.o
+ifeq ($(CONFIG_MSM_PM_LEGACY), y)
+obj-y += lpm-levels-legacy.o lpm-levels-of-legacy.o lpm-workarounds.o
+else
 obj-$(CONFIG_MSM_PM) += lpm-levels.o lpm-levels-of.o
+endif
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index f440d38..f47c545 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/slab.h>
+#include <linux/topology.h>
 
 #include <asm/cpuidle.h>
 
@@ -44,7 +45,7 @@
 	return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, idx);
 }
 
-static struct cpuidle_driver arm_idle_driver = {
+static struct cpuidle_driver arm_idle_driver __initdata = {
 	.name = "arm_idle",
 	.owner = THIS_MODULE,
 	/*
@@ -80,30 +81,42 @@
 static int __init arm_idle_init(void)
 {
 	int cpu, ret;
-	struct cpuidle_driver *drv = &arm_idle_driver;
+	struct cpuidle_driver *drv;
 	struct cpuidle_device *dev;
 
-	/*
-	 * Initialize idle states data, starting at index 1.
-	 * This driver is DT only, if no DT idle states are detected (ret == 0)
-	 * let the driver initialization fail accordingly since there is no
-	 * reason to initialize the idle driver if only wfi is supported.
-	 */
-	ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
-	if (ret <= 0)
-		return ret ? : -ENODEV;
-
-	ret = cpuidle_register_driver(drv);
-	if (ret) {
-		pr_err("Failed to register cpuidle driver\n");
-		return ret;
-	}
-
-	/*
-	 * Call arch CPU operations in order to initialize
-	 * idle states suspend back-end specific data
-	 */
 	for_each_possible_cpu(cpu) {
+
+		drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
+		if (!drv) {
+			ret = -ENOMEM;
+			goto out_fail;
+		}
+
+		drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+
+		/*
+		 * Initialize idle states data, starting at index 1.  This
+		 * driver is DT only, if no DT idle states are detected (ret
+		 * == 0) let the driver initialization fail accordingly since
+		 * there is no reason to initialize the idle driver if only
+		 * wfi is supported.
+		 */
+		ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
+		if (ret <= 0) {
+			ret = ret ? : -ENODEV;
+			goto out_kfree_drv;
+		}
+
+		ret = cpuidle_register_driver(drv);
+		if (ret) {
+			pr_err("Failed to register cpuidle driver\n");
+			goto out_kfree_drv;
+		}
+
+		/*
+		 * Call arch CPU operations in order to initialize
+		 * idle states suspend back-end specific data
+		 */
 		ret = arm_cpuidle_init(cpu);
 
 		/*
@@ -115,14 +128,14 @@
 
 		if (ret) {
 			pr_err("CPU %d failed to init idle CPU ops\n", cpu);
-			goto out_fail;
+			goto out_unregister_drv;
 		}
 
 		dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 		if (!dev) {
 			pr_err("Failed to allocate cpuidle device\n");
 			ret = -ENOMEM;
-			goto out_fail;
+			goto out_unregister_drv;
 		}
 		dev->cpu = cpu;
 
@@ -130,21 +143,28 @@
 		if (ret) {
 			pr_err("Failed to register cpuidle device for CPU %d\n",
 			       cpu);
-			kfree(dev);
-			goto out_fail;
+			goto out_kfree_dev;
 		}
 	}
 
 	return 0;
+
+out_kfree_dev:
+	kfree(dev);
+out_unregister_drv:
+	cpuidle_unregister_driver(drv);
+out_kfree_drv:
+	kfree(drv);
 out_fail:
 	while (--cpu >= 0) {
 		dev = per_cpu(cpuidle_devices, cpu);
+		drv = cpuidle_get_cpu_driver(dev);
 		cpuidle_unregister_device(dev);
+		cpuidle_unregister_driver(drv);
 		kfree(dev);
+		kfree(drv);
 	}
 
-	cpuidle_unregister_driver(drv);
-
 	return ret;
 }
 device_initcall(arm_idle_init);
diff --git a/drivers/cpuidle/lpm-levels-legacy.c b/drivers/cpuidle/lpm-levels-legacy.c
new file mode 100644
index 0000000..006a5ef
--- /dev/null
+++ b/drivers/cpuidle/lpm-levels-legacy.c
@@ -0,0 +1,1533 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/tick.h>
+#include <linux/suspend.h>
+#include <linux/pm_qos.h>
+#include <linux/of_platform.h>
+#include <linux/smp.h>
+#include <linux/remote_spinlock.h>
+#include <linux/msm_remote_spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/coresight-cti.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/cpu_pm.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm-legacy.h>
+#include <soc/qcom/rpm-notifier.h>
+#include <soc/qcom/event_timer.h>
+#include <soc/qcom/lpm-stats.h>
+#include <soc/qcom/lpm_levels.h>
+#include <soc/qcom/jtag.h>
+#include <asm/cputype.h>
+#include <asm/arch_timer.h>
+#include <asm/cacheflush.h>
+#include <asm/suspend.h>
+#include "lpm-levels-legacy.h"
+#include "lpm-workarounds.h"
+#include <trace/events/power.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_msm_low_power.h>
+#if defined(CONFIG_COMMON_CLK)
+#include "../clk/clk.h"
+#elif defined(CONFIG_COMMON_CLK_MSM)
+#include "../../drivers/clk/msm/clock.h"
+#endif /* CONFIG_COMMON_CLK */
+#include <soc/qcom/minidump.h>
+
+#define SCLK_HZ (32768)
+#define SCM_HANDOFF_LOCK_ID "S:7"
+#define PSCI_POWER_STATE(reset) (reset << 30)
+#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
+static remote_spinlock_t scm_handoff_lock;
+
+enum {
+	MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
+	MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
+};
+
+enum debug_event {
+	CPU_ENTER,
+	CPU_EXIT,
+	CLUSTER_ENTER,
+	CLUSTER_EXIT,
+	PRE_PC_CB,
+	CPU_HP_STARTING,
+	CPU_HP_DYING,
+};
+
+struct lpm_debug {
+	cycle_t time;
+	enum debug_event evt;
+	int cpu;
+	uint32_t arg1;
+	uint32_t arg2;
+	uint32_t arg3;
+	uint32_t arg4;
+};
+
+static struct system_pm_ops *sys_pm_ops;
+struct lpm_cluster *lpm_root_node;
+
+static DEFINE_PER_CPU(struct lpm_cluster*, cpu_cluster);
+static bool suspend_in_progress;
+static struct hrtimer lpm_hrtimer;
+static struct lpm_debug *lpm_debug;
+static phys_addr_t lpm_debug_phys;
+
+static const int num_dbg_elements = 0x100;
+
+static void cluster_unprepare(struct lpm_cluster *cluster,
+		const struct cpumask *cpu, int child_idx, bool from_idle,
+		int64_t time);
+static void cluster_prepare(struct lpm_cluster *cluster,
+		const struct cpumask *cpu, int child_idx, bool from_idle,
+		int64_t time);
+
+static bool menu_select;
+module_param_named(
+	menu_select, menu_select, bool, 0664
+);
+
+static bool print_parsed_dt;
+module_param_named(
+	print_parsed_dt, print_parsed_dt, bool, 0664
+);
+
+static bool sleep_disabled;
+module_param_named(sleep_disabled,
+	sleep_disabled, bool, 0664);
+
+s32 msm_cpuidle_get_deep_idle_latency(void)
+{
+	return 10;
+}
+EXPORT_SYMBOL(msm_cpuidle_get_deep_idle_latency);
+
+uint32_t register_system_pm_ops(struct system_pm_ops *pm_ops)
+{
+	if (sys_pm_ops)
+		return -EUSERS;
+
+	sys_pm_ops = pm_ops;
+
+	return 0;
+}
+
+static uint32_t least_cluster_latency(struct lpm_cluster *cluster,
+					struct latency_level *lat_level)
+{
+	struct list_head *list;
+	struct lpm_cluster_level *level;
+	struct lpm_cluster *n;
+	struct power_params *pwr_params;
+	uint32_t latency = 0;
+	int i;
+
+	if (!cluster->list.next) {
+		for (i = 0; i < cluster->nlevels; i++) {
+			level = &cluster->levels[i];
+			pwr_params = &level->pwr;
+			if (lat_level->reset_level == level->reset_level) {
+				if ((latency > pwr_params->latency_us)
+						|| (!latency))
+					latency = pwr_params->latency_us;
+				break;
+			}
+		}
+	} else {
+		list_for_each(list, &cluster->parent->child) {
+			n = list_entry(list, typeof(*n), list);
+			if (lat_level->level_name) {
+				if (strcmp(lat_level->level_name,
+						 n->cluster_name))
+					continue;
+			}
+			for (i = 0; i < n->nlevels; i++) {
+				level = &n->levels[i];
+				pwr_params = &level->pwr;
+				if (lat_level->reset_level ==
+						level->reset_level) {
+					if ((latency > pwr_params->latency_us)
+								|| (!latency))
+						latency =
+						pwr_params->latency_us;
+					break;
+				}
+			}
+		}
+	}
+	return latency;
+}
+
+static uint32_t least_cpu_latency(struct list_head *child,
+				struct latency_level *lat_level)
+{
+	struct list_head *list;
+	struct lpm_cpu_level *level;
+	struct power_params *pwr_params;
+	struct lpm_cpu *cpu;
+	struct lpm_cluster *n;
+	uint32_t latency = 0;
+	int i;
+
+	list_for_each(list, child) {
+		n = list_entry(list, typeof(*n), list);
+		if (lat_level->level_name) {
+			if (strcmp(lat_level->level_name, n->cluster_name))
+				continue;
+		}
+		cpu = n->cpu;
+		for (i = 0; i < cpu->nlevels; i++) {
+			level = &cpu->levels[i];
+			pwr_params = &level->pwr;
+			if (lat_level->reset_level == level->reset_level) {
+				if ((latency > pwr_params->latency_us)
+							|| (!latency))
+					latency = pwr_params->latency_us;
+				break;
+			}
+		}
+	}
+	return latency;
+}
+
+static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster,
+							int affinity_level)
+{
+	struct lpm_cluster *n;
+
+	if ((cluster->aff_level == affinity_level)
+		|| ((cluster->cpu) && (affinity_level == 0)))
+		return cluster;
+	else if (!cluster->cpu) {
+		n =  list_entry(cluster->child.next, typeof(*n), list);
+		return cluster_aff_match(n, affinity_level);
+	} else
+		return NULL;
+}
+
+int lpm_get_latency(struct latency_level *level, uint32_t *latency)
+{
+	struct lpm_cluster *cluster;
+	uint32_t val;
+
+	if (!lpm_root_node) {
+		pr_err("%s: lpm_probe not completed\n", __func__);
+		return -EAGAIN;
+	}
+
+	if ((level->affinity_level < 0)
+		|| (level->affinity_level > lpm_root_node->aff_level)
+		|| (level->reset_level < LPM_RESET_LVL_RET)
+		|| (level->reset_level > LPM_RESET_LVL_PC)
+		|| !latency)
+		return -EINVAL;
+
+	cluster = cluster_aff_match(lpm_root_node, level->affinity_level);
+	if (!cluster) {
+		pr_err("%s:No matching cluster found for affinity_level:%d\n",
+					__func__, level->affinity_level);
+		return -EINVAL;
+	}
+
+	if (level->affinity_level == 0)
+		val = least_cpu_latency(&cluster->parent->child, level);
+	else
+		val = least_cluster_latency(cluster, level);
+
+	if (!val) {
+		pr_err("%s:No mode with affinity_level:%d reset_level:%d\n",
+			__func__, level->affinity_level, level->reset_level);
+		return -EINVAL;
+	}
+
+	*latency = val;
+
+	return 0;
+}
+EXPORT_SYMBOL(lpm_get_latency);
+
+static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
+		uint32_t arg2, uint32_t arg3, uint32_t arg4)
+{
+	struct lpm_debug *dbg;
+	int idx;
+	static DEFINE_SPINLOCK(debug_lock);
+	static int pc_event_index;
+
+	if (!lpm_debug)
+		return;
+
+	spin_lock(&debug_lock);
+	idx = pc_event_index++;
+	dbg = &lpm_debug[idx & (num_dbg_elements - 1)];
+
+	dbg->evt = event;
+	dbg->time = arch_counter_get_cntpct();
+	dbg->cpu = raw_smp_processor_id();
+	dbg->arg1 = arg1;
+	dbg->arg2 = arg2;
+	dbg->arg3 = arg3;
+	dbg->arg4 = arg4;
+	spin_unlock(&debug_lock);
+}
+
+static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
+{
+	return HRTIMER_NORESTART;
+}
+
+static void msm_pm_set_timer(uint32_t modified_time_us)
+{
+	u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
+	ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
+
+	lpm_hrtimer.function = lpm_hrtimer_cb;
+	hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
+}
+
+int set_l2_mode(struct low_power_ops *ops, int mode,
+				struct lpm_cluster_level *level)
+{
+	int lpm = mode;
+	int rc = 0;
+	bool notify_rpm = level->notify_rpm;
+	struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
+			smp_processor_id())->lpm_dev;
+
+	if (cpu_ops->tz_flag & MSM_SCM_L2_OFF ||
+			cpu_ops->tz_flag & MSM_SCM_L2_GDHS)
+		coresight_cti_ctx_restore();
+
+	switch (mode) {
+	case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_FASTPC:
+		if (level->no_cache_flush)
+			cpu_ops->tz_flag = MSM_SCM_L2_GDHS;
+		else
+			cpu_ops->tz_flag = MSM_SCM_L2_OFF;
+		coresight_cti_ctx_save();
+		break;
+	case MSM_SPM_MODE_GDHS:
+		cpu_ops->tz_flag = MSM_SCM_L2_GDHS;
+		coresight_cti_ctx_save();
+		break;
+	case MSM_SPM_MODE_CLOCK_GATING:
+	case MSM_SPM_MODE_RETENTION:
+	case MSM_SPM_MODE_DISABLED:
+		cpu_ops->tz_flag = MSM_SCM_L2_ON;
+		break;
+	default:
+		cpu_ops->tz_flag = MSM_SCM_L2_ON;
+		lpm = MSM_SPM_MODE_DISABLED;
+		break;
+	}
+
+	if (lpm_wa_get_skip_l2_spm())
+		rc = msm_spm_config_low_power_mode_addr(ops->spm, lpm,
+							notify_rpm);
+	else
+		rc = msm_spm_config_low_power_mode(ops->spm, lpm, notify_rpm);
+
+	if (rc)
+		pr_err("%s: Failed to set L2 low power mode %d, ERR %d",
+				__func__, lpm, rc);
+
+	return rc;
+}
+
+int set_l3_mode(struct low_power_ops *ops, int mode,
+				struct lpm_cluster_level *level)
+{
+	bool notify_rpm = level->notify_rpm;
+	struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
+			smp_processor_id())->lpm_dev;
+
+	switch (mode) {
+	case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_FASTPC:
+		cpu_ops->tz_flag |= MSM_SCM_L3_PC_OFF;
+		break;
+	default:
+		break;
+	}
+	return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
+}
+
+
+int set_system_mode(struct low_power_ops *ops, int mode,
+				struct lpm_cluster_level *level)
+{
+	bool notify_rpm = level->notify_rpm;
+
+	return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
+}
+
+static int set_device_mode(struct lpm_cluster *cluster, int ndevice,
+		struct lpm_cluster_level *level)
+{
+	struct low_power_ops *ops;
+
+	if (use_psci)
+		return 0;
+
+	ops = &cluster->lpm_dev[ndevice];
+	if (ops && ops->set_mode)
+		return ops->set_mode(ops, level->mode[ndevice],
+				level);
+	else
+		return -EINVAL;
+}
+
+static int cpu_power_select(struct cpuidle_device *dev,
+		struct lpm_cpu *cpu)
+{
+	int best_level = 0;
+	uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
+							dev->cpu);
+	s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length());
+	uint32_t modified_time_us = 0;
+	uint32_t next_event_us = 0;
+	int i;
+	uint32_t lvl_latency_us = 0;
+	uint32_t *residency = get_per_cpu_max_residency(dev->cpu);
+
+	if (!cpu)
+		return best_level;
+
+	if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us  < 0)
+		return 0;
+
+	next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));
+
+	for (i = 0; i < cpu->nlevels; i++) {
+		struct lpm_cpu_level *level = &cpu->levels[i];
+		struct power_params *pwr_params = &level->pwr;
+		uint32_t next_wakeup_us = (uint32_t)sleep_us;
+		enum msm_pm_sleep_mode mode = level->mode;
+		bool allow;
+
+		allow = lpm_cpu_mode_allow(dev->cpu, i, true);
+
+		if (!allow)
+			continue;
+
+		lvl_latency_us = pwr_params->latency_us;
+
+		if (latency_us < lvl_latency_us)
+			break;
+
+		if (next_event_us) {
+			if (next_event_us < lvl_latency_us)
+				break;
+
+			if (((next_event_us - lvl_latency_us) < sleep_us) ||
+					(next_event_us < sleep_us))
+				next_wakeup_us = next_event_us - lvl_latency_us;
+		}
+
+		best_level = i;
+
+		if (next_event_us && next_event_us < sleep_us &&
+				(mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
+			modified_time_us
+				= next_event_us - lvl_latency_us;
+		else
+			modified_time_us = 0;
+
+		if (next_wakeup_us <= residency[i])
+			break;
+	}
+
+	if (modified_time_us)
+		msm_pm_set_timer(modified_time_us);
+
+	trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us);
+
+	return best_level;
+}
+
+static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
+		struct cpumask *mask, bool from_idle)
+{
+	int cpu;
+	int next_cpu = raw_smp_processor_id();
+	ktime_t next_event;
+	struct cpumask online_cpus_in_cluster;
+
+	next_event.tv64 = KTIME_MAX;
+	if (!from_idle) {
+		if (mask)
+			cpumask_copy(mask, cpumask_of(raw_smp_processor_id()));
+		return ~0ULL;
+	}
+
+	cpumask_and(&online_cpus_in_cluster,
+			&cluster->num_children_in_sync, cpu_online_mask);
+
+	for_each_cpu(cpu, &online_cpus_in_cluster) {
+		ktime_t *next_event_c;
+
+		next_event_c = get_next_event_cpu(cpu);
+		if (next_event_c->tv64 < next_event.tv64) {
+			next_event.tv64 = next_event_c->tv64;
+			next_cpu = cpu;
+		}
+	}
+
+	if (mask)
+		cpumask_copy(mask, cpumask_of(next_cpu));
+
+
+	if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
+		return ktime_to_us(ktime_sub(next_event, ktime_get()));
+	else
+		return 0;
+}
+
+static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
+{
+	int best_level = -1;
+	int i;
+	struct cpumask mask;
+	uint32_t latency_us = ~0U;
+	uint32_t sleep_us;
+
+	if (!cluster)
+		return -EINVAL;
+
+	sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL, from_idle);
+
+	if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
+		latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
+							&mask);
+
+	/*
+	 * If atleast one of the core in the cluster is online, the cluster
+	 * low power modes should be determined by the idle characteristics
+	 * even if the last core enters the low power mode as a part of
+	 * hotplug.
+	 */
+
+	if (!from_idle && num_online_cpus() > 1 &&
+		cpumask_intersects(&cluster->child_cpus, cpu_online_mask))
+		from_idle = true;
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct lpm_cluster_level *level = &cluster->levels[i];
+		struct power_params *pwr_params = &level->pwr;
+
+		if (!lpm_cluster_mode_allow(cluster, i, from_idle))
+			continue;
+
+		if (level->last_core_only &&
+			cpumask_weight(cpu_online_mask) > 1)
+			continue;
+
+		if (!cpumask_equal(&cluster->num_children_in_sync,
+					&level->num_cpu_votes))
+			continue;
+
+		if (from_idle && latency_us < pwr_params->latency_us)
+			break;
+
+		if (sleep_us < pwr_params->time_overhead_us)
+			break;
+
+		if (suspend_in_progress && from_idle && level->notify_rpm)
+			continue;
+
+		if (level->notify_rpm) {
+			if (!(sys_pm_ops && sys_pm_ops->sleep_allowed))
+				continue;
+			if (!sys_pm_ops->sleep_allowed())
+				continue;
+		}
+
+		best_level = i;
+
+		if (from_idle && sleep_us <= pwr_params->max_residency)
+			break;
+	}
+
+	return best_level;
+}
+
+static void cluster_notify(struct lpm_cluster *cluster,
+		struct lpm_cluster_level *level, bool enter)
+{
+	if (level->is_reset && enter)
+		cpu_cluster_pm_enter(cluster->aff_level);
+	else if (level->is_reset && !enter)
+		cpu_cluster_pm_exit(cluster->aff_level);
+}
+
+static unsigned int get_next_online_cpu(bool from_idle)
+{
+	unsigned int cpu;
+	ktime_t next_event;
+	unsigned int next_cpu = raw_smp_processor_id();
+
+	if (!from_idle)
+		return next_cpu;
+	next_event.tv64 = KTIME_MAX;
+	for_each_online_cpu(cpu) {
+		ktime_t *next_event_c;
+
+		next_event_c = get_next_event_cpu(cpu);
+		if (next_event_c->tv64 < next_event.tv64) {
+			next_event.tv64 = next_event_c->tv64;
+			next_cpu = cpu;
+		}
+	}
+	return next_cpu;
+}
+
+static int cluster_configure(struct lpm_cluster *cluster, int idx,
+		bool from_idle)
+{
+	struct lpm_cluster_level *level = &cluster->levels[idx];
+	struct cpumask cpumask;
+	unsigned int cpu;
+	int ret, i;
+
+	if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus)
+			|| is_IPI_pending(&cluster->num_children_in_sync)) {
+		return -EPERM;
+	}
+
+	if (idx != cluster->default_level) {
+		update_debug_pc_event(CLUSTER_ENTER, idx,
+			cluster->num_children_in_sync.bits[0],
+			cluster->child_cpus.bits[0], from_idle);
+		trace_cluster_enter(cluster->cluster_name, idx,
+			cluster->num_children_in_sync.bits[0],
+			cluster->child_cpus.bits[0], from_idle);
+		lpm_stats_cluster_enter(cluster->stats, idx);
+	}
+
+	for (i = 0; i < cluster->ndevices; i++) {
+		ret = set_device_mode(cluster, i, level);
+		if (ret)
+			goto failed_set_mode;
+	}
+
+	if (level->notify_rpm) {
+		struct cpumask *nextcpu;
+
+		cpu = get_next_online_cpu(from_idle);
+		cpumask_copy(&cpumask, cpumask_of(cpu));
+		nextcpu = level->disable_dynamic_routing ? NULL : &cpumask;
+
+		if (sys_pm_ops && sys_pm_ops->enter)
+			if ((sys_pm_ops->enter(nextcpu)))
+				return -EBUSY;
+
+		if (cluster->no_saw_devices && !use_psci)
+			msm_spm_set_rpm_hs(true);
+	}
+
+	/* Notify cluster enter event after successfully config completion */
+	cluster_notify(cluster, level, true);
+
+	cluster->last_level = idx;
+	return 0;
+
+failed_set_mode:
+
+	for (i = 0; i < cluster->ndevices; i++) {
+		int rc = 0;
+
+		level = &cluster->levels[cluster->default_level];
+		rc = set_device_mode(cluster, i, level);
+		WARN_ON(rc);
+	}
+	return ret;
+}
+
+static void cluster_prepare(struct lpm_cluster *cluster,
+		const struct cpumask *cpu, int child_idx, bool from_idle,
+		int64_t start_time)
+{
+	int i;
+
+	if (!cluster)
+		return;
+
+	if (cluster->min_child_level > child_idx)
+		return;
+
+	spin_lock(&cluster->sync_lock);
+	cpumask_or(&cluster->num_children_in_sync, cpu,
+			&cluster->num_children_in_sync);
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct lpm_cluster_level *lvl = &cluster->levels[i];
+
+		if (child_idx >= lvl->min_child_level)
+			cpumask_or(&lvl->num_cpu_votes, cpu,
+					&lvl->num_cpu_votes);
+	}
+
+	/*
+	 * cluster_select() does not make any configuration changes. So its ok
+	 * to release the lock here. If a core wakes up for a rude request,
+	 * it need not wait for another to finish its cluster selection and
+	 * configuration process
+	 */
+
+	if (!cpumask_equal(&cluster->num_children_in_sync,
+				&cluster->child_cpus))
+		goto failed;
+
+	i = cluster_select(cluster, from_idle);
+
+	if (i < 0)
+		goto failed;
+
+	if (cluster_configure(cluster, i, from_idle))
+		goto failed;
+
+	cluster->stats->sleep_time = start_time;
+	cluster_prepare(cluster->parent, &cluster->num_children_in_sync, i,
+			from_idle, start_time);
+
+	spin_unlock(&cluster->sync_lock);
+
+	if (!use_psci) {
+		struct lpm_cluster_level *level = &cluster->levels[i];
+
+		if (level->notify_rpm)
+			if (sys_pm_ops && sys_pm_ops->update_wakeup)
+				sys_pm_ops->update_wakeup(from_idle);
+	}
+
+	return;
+failed:
+	spin_unlock(&cluster->sync_lock);
+	cluster->stats->sleep_time = 0;
+}
+
+static void cluster_unprepare(struct lpm_cluster *cluster,
+		const struct cpumask *cpu, int child_idx, bool from_idle,
+		int64_t end_time)
+{
+	struct lpm_cluster_level *level;
+	bool first_cpu;
+	int last_level, i, ret;
+
+	if (!cluster)
+		return;
+
+	if (cluster->min_child_level > child_idx)
+		return;
+
+	spin_lock(&cluster->sync_lock);
+	last_level = cluster->default_level;
+	first_cpu = cpumask_equal(&cluster->num_children_in_sync,
+				&cluster->child_cpus);
+	cpumask_andnot(&cluster->num_children_in_sync,
+			&cluster->num_children_in_sync, cpu);
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct lpm_cluster_level *lvl = &cluster->levels[i];
+
+		if (child_idx >= lvl->min_child_level)
+			cpumask_andnot(&lvl->num_cpu_votes,
+					&lvl->num_cpu_votes, cpu);
+	}
+
+	if (!first_cpu || cluster->last_level == cluster->default_level)
+		goto unlock_return;
+
+	if (cluster->stats->sleep_time)
+		cluster->stats->sleep_time = end_time -
+			cluster->stats->sleep_time;
+	lpm_stats_cluster_exit(cluster->stats, cluster->last_level, true);
+
+	level = &cluster->levels[cluster->last_level];
+	if (level->notify_rpm) {
+		if (sys_pm_ops && sys_pm_ops->exit)
+			sys_pm_ops->exit();
+
+		/* If RPM bumps up CX to turbo, unvote CX turbo vote
+		 * during exit of rpm assisted power collapse to
+		 * reduce the power impact
+		 */
+		lpm_wa_cx_unvote_send();
+
+		if (cluster->no_saw_devices && !use_psci)
+			msm_spm_set_rpm_hs(false);
+
+	}
+
+	update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
+			cluster->num_children_in_sync.bits[0],
+			cluster->child_cpus.bits[0], from_idle);
+	trace_cluster_exit(cluster->cluster_name, cluster->last_level,
+			cluster->num_children_in_sync.bits[0],
+			cluster->child_cpus.bits[0], from_idle);
+
+	last_level = cluster->last_level;
+	cluster->last_level = cluster->default_level;
+
+	for (i = 0; i < cluster->ndevices; i++) {
+		level = &cluster->levels[cluster->default_level];
+		ret = set_device_mode(cluster, i, level);
+
+		WARN_ON(ret);
+
+	}
+
+	cluster_notify(cluster, &cluster->levels[last_level], false);
+	cluster_unprepare(cluster->parent, &cluster->child_cpus,
+			last_level, from_idle, end_time);
+unlock_return:
+	spin_unlock(&cluster->sync_lock);
+}
+
+static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index,
+				bool from_idle)
+{
+	struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
+	bool jtag_save_restore =
+			cluster->cpu->levels[cpu_index].jtag_save_restore;
+
+	/* Use broadcast timer for aggregating sleep mode within a cluster.
+	 * A broadcast timer could be used in the following scenarios
+	 * 1) The architected timer HW gets reset during certain low power
+	 * modes and the core relies on a external(broadcast) timer to wake up
+	 * from sleep. This information is passed through device tree.
+	 * 2) The CPU low power mode could trigger a system low power mode.
+	 * The low power module relies on Broadcast timer to aggregate the
+	 * next wakeup within a cluster, in which case, CPU switches over to
+	 * use broadcast timer.
+	 */
+	if (from_idle && (cpu_level->use_bc_timer ||
+			(cpu_index >= cluster->min_child_level)))
+		tick_broadcast_enter();
+
+	if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
+		|| (cpu_level->mode ==
+			MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
+			|| (cpu_level->is_reset)))
+		cpu_pm_enter();
+
+	/*
+	 * Save JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
+	 */
+	if (jtag_save_restore)
+		msm_jtag_save_state();
+}
+
+static inline void cpu_unprepare(struct lpm_cluster *cluster, int cpu_index,
+				bool from_idle)
+{
+	struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
+	bool jtag_save_restore =
+			cluster->cpu->levels[cpu_index].jtag_save_restore;
+
+	if (from_idle && (cpu_level->use_bc_timer ||
+			(cpu_index >= cluster->min_child_level)))
+		tick_broadcast_exit();
+
+	if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
+		|| (cpu_level->mode ==
+			MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
+		|| cpu_level->is_reset))
+		cpu_pm_exit();
+
+	/*
+	 * Restore JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
+	 */
+	if (jtag_save_restore)
+		msm_jtag_restore_state();
+}
+
+#if defined(CONFIG_ARM_PSCI) || !defined(CONFIG_CPU_V7)
+static int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl)
+{
+	int state_id = 0;
+
+	if (!cluster)
+		return 0;
+
+	spin_lock(&cluster->sync_lock);
+
+	if (!cpumask_equal(&cluster->num_children_in_sync,
+				&cluster->child_cpus))
+		goto unlock_and_return;
+
+	state_id |= get_cluster_id(cluster->parent, aff_lvl);
+
+	if (cluster->last_level != cluster->default_level) {
+		struct lpm_cluster_level *level
+			= &cluster->levels[cluster->last_level];
+
+		state_id |= (level->psci_id & cluster->psci_mode_mask)
+					<< cluster->psci_mode_shift;
+		(*aff_lvl)++;
+	}
+unlock_and_return:
+	spin_unlock(&cluster->sync_lock);
+	return state_id;
+}
+#endif
+
+#if !defined(CONFIG_CPU_V7)
+asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64);
+static bool psci_enter_sleep(struct lpm_cluster *cluster,
+					int idx, bool from_idle)
+
+{
+	bool ret;
+	/*
+	 * idx = 0 is the default LPM state
+	 */
+	if (!idx) {
+		stop_critical_timings();
+		wfi();
+		start_critical_timings();
+		ret = true;
+	} else {
+		int affinity_level = 0;
+		int state_id = get_cluster_id(cluster, &affinity_level);
+		int power_state =
+			PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
+		bool success = false;
+
+		if (cluster->cpu->levels[idx].hyp_psci) {
+			stop_critical_timings();
+			__invoke_psci_fn_smc(0xC4000021, 0, 0, 0);
+			start_critical_timings();
+			return 1;
+		}
+
+		affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
+		state_id |= (power_state | affinity_level
+			| cluster->cpu->levels[idx].psci_id);
+
+		update_debug_pc_event(CPU_ENTER, state_id,
+						0xdeaffeed, 0xdeaffeed, true);
+		stop_critical_timings();
+		success = !arm_cpuidle_suspend(state_id);
+		start_critical_timings();
+		update_debug_pc_event(CPU_EXIT, state_id,
+						success, 0xdeaffeed, true);
+		ret = success;
+	}
+	return ret;
+}
+#elif defined(CONFIG_ARM_PSCI)
+static bool psci_enter_sleep(struct lpm_cluster *cluster,
+					int idx, bool from_idle)
+{
+	bool ret;
+
+	if (!idx) {
+		stop_critical_timings();
+		wfi();
+		start_critical_timings();
+		ret = true;
+	} else {
+		int affinity_level = 0;
+		int state_id = get_cluster_id(cluster, &affinity_level);
+		int power_state =
+			PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
+		bool success = false;
+
+		affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
+		state_id |= (power_state | affinity_level
+			| cluster->cpu->levels[idx].psci_id);
+
+		update_debug_pc_event(CPU_ENTER, state_id,
+						0xdeaffeed, 0xdeaffeed, true);
+		stop_critical_timings();
+		success = !arm_cpuidle_suspend(state_id);
+		start_critical_timings();
+		update_debug_pc_event(CPU_EXIT, state_id,
+						success, 0xdeaffeed, true);
+		ret = success;
+	}
+	return ret;
+}
+#else
+static bool psci_enter_sleep(struct lpm_cluster *cluster,
+					int idx, bool from_idle)
+{
+	WARN_ONCE(true, "PSCI cpu_suspend ops not supported\n");
+	return false;
+}
+#endif
+
+static int lpm_cpuidle_select(struct cpuidle_driver *drv,
+		struct cpuidle_device *dev)
+{
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
+	int idx;
+
+	if (!cluster)
+		return 0;
+
+	idx = cpu_power_select(dev, cluster->cpu);
+
+	return idx;
+}
+
+static int lpm_cpuidle_enter(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int idx)
+{
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
+	bool success = true;
+	const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
+	ktime_t start = ktime_get();
+	int64_t start_time = ktime_to_ns(ktime_get()), end_time;
+
+	if (idx < 0)
+		return -EINVAL;
+
+	cpu_prepare(cluster, idx, true);
+	cluster_prepare(cluster, cpumask, idx, true, ktime_to_ns(ktime_get()));
+
+	trace_cpu_idle_enter(idx);
+	lpm_stats_cpu_enter(idx, start_time);
+
+	if (need_resched())
+		goto exit;
+
+	if (!use_psci) {
+		if (idx > 0)
+			update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
+					0xdeaffeed, true);
+		success = msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode,
+				true);
+
+		if (idx > 0)
+			update_debug_pc_event(CPU_EXIT, idx, success,
+							0xdeaffeed, true);
+	} else {
+		success = psci_enter_sleep(cluster, idx, true);
+	}
+
+exit:
+	end_time = ktime_to_ns(ktime_get());
+	lpm_stats_cpu_exit(idx, end_time, success);
+
+	cluster_unprepare(cluster, cpumask, idx, true, end_time);
+	cpu_unprepare(cluster, idx, true);
+
+	trace_cpu_idle_exit(idx, success);
+	dev->last_residency = ktime_us_delta(ktime_get(), start);
+	local_irq_enable();
+
+	return idx;
+}
+
+#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
+static int cpuidle_register_cpu(struct cpuidle_driver *drv,
+		struct cpumask *mask)
+{
+	struct cpuidle_device *device;
+	int cpu, ret;
+
+
+	if (!mask || !drv)
+		return -EINVAL;
+
+	drv->cpumask = mask;
+	ret = cpuidle_register_driver(drv);
+	if (ret) {
+		pr_err("Failed to register cpuidle driver %d\n", ret);
+		goto failed_driver_register;
+	}
+
+	for_each_cpu(cpu, mask) {
+		device = &per_cpu(cpuidle_dev, cpu);
+		device->cpu = cpu;
+
+		ret = cpuidle_register_device(device);
+		if (ret) {
+			pr_err("Failed to register cpuidle driver for cpu:%u\n",
+					cpu);
+			goto failed_driver_register;
+		}
+	}
+	return ret;
+failed_driver_register:
+	for_each_cpu(cpu, mask)
+		cpuidle_unregister_driver(drv);
+	return ret;
+}
+#else
+static int cpuidle_register_cpu(struct cpuidle_driver *drv,
+		struct  cpumask *mask)
+{
+	return cpuidle_register(drv, NULL);
+}
+#endif
+
+static struct cpuidle_governor lpm_governor = {
+	.name =		"qcom",
+	.rating =	30,
+	.select =	lpm_cpuidle_select,
+	.owner =	THIS_MODULE,
+};
+
+static int cluster_cpuidle_register(struct lpm_cluster *cl)
+{
+	int i = 0, ret = 0;
+	unsigned int cpu;
+	struct lpm_cluster *p = NULL;
+
+	if (!cl->cpu) {
+		struct lpm_cluster *n;
+
+		list_for_each_entry(n, &cl->child, list) {
+			ret = cluster_cpuidle_register(n);
+			if (ret)
+				break;
+		}
+		return ret;
+	}
+
+	cl->drv = kzalloc(sizeof(*cl->drv), GFP_KERNEL);
+	if (!cl->drv)
+		return -ENOMEM;
+
+	cl->drv->name = "msm_idle";
+
+	for (i = 0; i < cl->cpu->nlevels; i++) {
+		struct cpuidle_state *st = &cl->drv->states[i];
+		struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i];
+
+		snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
+		snprintf(st->desc, CPUIDLE_DESC_LEN, "%s", cpu_level->name);
+		st->flags = 0;
+		st->exit_latency = cpu_level->pwr.latency_us;
+		st->power_usage = cpu_level->pwr.ss_power;
+		st->target_residency = 0;
+		st->enter = lpm_cpuidle_enter;
+	}
+
+	cl->drv->state_count = cl->cpu->nlevels;
+	cl->drv->safe_state_index = 0;
+	for_each_cpu(cpu, &cl->child_cpus)
+		per_cpu(cpu_cluster, cpu) = cl;
+
+	for_each_possible_cpu(cpu) {
+		if (cpu_online(cpu))
+			continue;
+		p = per_cpu(cpu_cluster, cpu);
+		while (p) {
+			int j;
+
+			spin_lock(&p->sync_lock);
+			cpumask_set_cpu(cpu, &p->num_children_in_sync);
+			for (j = 0; j < p->nlevels; j++)
+				cpumask_copy(&p->levels[j].num_cpu_votes,
+						&p->num_children_in_sync);
+			spin_unlock(&p->sync_lock);
+			p = p->parent;
+		}
+	}
+	ret = cpuidle_register_cpu(cl->drv, &cl->child_cpus);
+
+	if (ret) {
+		kfree(cl->drv);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+/**
+ * init_lpm - initializes the governor
+ */
+static int __init init_lpm(void)
+{
+	return cpuidle_register_governor(&lpm_governor);
+}
+
+postcore_initcall(init_lpm);
+
+static void register_cpu_lpm_stats(struct lpm_cpu *cpu,
+		struct lpm_cluster *parent)
+{
+	const char **level_name;
+	int i;
+
+	level_name = kcalloc(cpu->nlevels, sizeof(*level_name), GFP_KERNEL);
+
+	if (!level_name)
+		return;
+
+	for (i = 0; i < cpu->nlevels; i++)
+		level_name[i] = cpu->levels[i].name;
+
+	lpm_stats_config_level("cpu", level_name, cpu->nlevels,
+			parent->stats, &parent->child_cpus);
+
+	kfree(level_name);
+}
+
+static void register_cluster_lpm_stats(struct lpm_cluster *cl,
+		struct lpm_cluster *parent)
+{
+	const char **level_name;
+	int i;
+	struct lpm_cluster *child;
+
+	if (!cl)
+		return;
+
+	level_name = kcalloc(cl->nlevels, sizeof(*level_name), GFP_KERNEL);
+
+	if (!level_name)
+		return;
+
+	for (i = 0; i < cl->nlevels; i++)
+		level_name[i] = cl->levels[i].level_name;
+
+	cl->stats = lpm_stats_config_level(cl->cluster_name, level_name,
+			cl->nlevels, parent ? parent->stats : NULL, NULL);
+
+	kfree(level_name);
+
+	if (cl->cpu) {
+		register_cpu_lpm_stats(cl->cpu, cl);
+		return;
+	}
+
+	list_for_each_entry(child, &cl->child, list)
+		register_cluster_lpm_stats(child, cl);
+}
+
+static int lpm_suspend_prepare(void)
+{
+	suspend_in_progress = true;
+	lpm_stats_suspend_enter();
+
+	return 0;
+}
+
+static void lpm_suspend_wake(void)
+{
+	suspend_in_progress = false;
+	lpm_stats_suspend_exit();
+}
+
+static int lpm_suspend_enter(suspend_state_t state)
+{
+	int cpu = raw_smp_processor_id();
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+	struct lpm_cpu *lpm_cpu = cluster->cpu;
+	const struct cpumask *cpumask = get_cpu_mask(cpu);
+	int idx;
+
+	for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) {
+
+		if (lpm_cpu_mode_allow(cpu, idx, false))
+			break;
+	}
+	if (idx < 0) {
+		pr_err("Failed suspend\n");
+		return 0;
+	}
+	cpu_prepare(cluster, idx, false);
+	cluster_prepare(cluster, cpumask, idx, false, 0);
+	if (idx > 0)
+		update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
+					0xdeaffeed, false);
+
+	/*
+	 * Print the clocks which are enabled during system suspend
+	 * This debug information is useful to know which are the
+	 * clocks that are enabled and preventing the system level
+	 * LPMs(XO and Vmin).
+	 */
+	clock_debug_print_enabled(true);
+
+	if (!use_psci)
+		msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode, false);
+	else
+		psci_enter_sleep(cluster, idx, true);
+
+	if (idx > 0)
+		update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed,
+					false);
+
+	cluster_unprepare(cluster, cpumask, idx, false, 0);
+	cpu_unprepare(cluster, idx, false);
+	return 0;
+}
+
+static int lpm_dying_cpu(unsigned int cpu)
+{
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+
+	update_debug_pc_event(CPU_HP_DYING, cpu,
+				cluster->num_children_in_sync.bits[0],
+				cluster->child_cpus.bits[0], false);
+	cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
+	return 0;
+}
+
+static int lpm_starting_cpu(unsigned int cpu)
+{
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+
+	update_debug_pc_event(CPU_HP_STARTING, cpu,
+				cluster->num_children_in_sync.bits[0],
+				cluster->child_cpus.bits[0], false);
+	cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
+	return 0;
+}
+
+static const struct platform_suspend_ops lpm_suspend_ops = {
+	.enter = lpm_suspend_enter,
+	.valid = suspend_valid_only_mem,
+	.prepare_late = lpm_suspend_prepare,
+	.wake = lpm_suspend_wake,
+};
+
+static int lpm_probe(struct platform_device *pdev)
+{
+	int ret;
+	int size;
+	struct kobject *module_kobj = NULL;
+	struct md_region md_entry;
+
+	get_online_cpus();
+	lpm_root_node = lpm_of_parse_cluster(pdev);
+
+	if (IS_ERR_OR_NULL(lpm_root_node)) {
+		pr_err("%s(): Failed to probe low power modes\n", __func__);
+		put_online_cpus();
+		return PTR_ERR(lpm_root_node);
+	}
+
+	if (print_parsed_dt)
+		cluster_dt_walkthrough(lpm_root_node);
+
+	/*
+	 * Register hotplug notifier before broadcast time to ensure there
+	 * to prevent race where a broadcast timer might not be setup on for a
+	 * core.  BUG in existing code but no known issues possibly because of
+	 * how late lpm_levels gets initialized.
+	 */
+	suspend_set_ops(&lpm_suspend_ops);
+	hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+	ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID);
+	if (ret) {
+		pr_err("%s: Failed initializing scm_handoff_lock (%d)\n",
+			__func__, ret);
+		put_online_cpus();
+		return ret;
+	}
+	size = num_dbg_elements * sizeof(struct lpm_debug);
+	lpm_debug = dma_alloc_coherent(&pdev->dev, size,
+			&lpm_debug_phys, GFP_KERNEL);
+	register_cluster_lpm_stats(lpm_root_node, NULL);
+
+	ret = cluster_cpuidle_register(lpm_root_node);
+	put_online_cpus();
+	if (ret) {
+		pr_err("%s()Failed to register with cpuidle framework\n",
+				__func__);
+		goto failed;
+	}
+	ret = cpuhp_setup_state(CPUHP_AP_QCOM_SLEEP_STARTING,
+			"AP_QCOM_SLEEP_STARTING",
+			lpm_starting_cpu, lpm_dying_cpu);
+	if (ret)
+		goto failed;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("%s: cannot find kobject for module %s\n",
+			__func__, KBUILD_MODNAME);
+		ret = -ENOENT;
+		goto failed;
+	}
+
+	ret = create_cluster_lvl_nodes(lpm_root_node, module_kobj);
+	if (ret) {
+		pr_err("%s(): Failed to create cluster level nodes\n",
+				__func__);
+		goto failed;
+	}
+
+	/* Add lpm_debug to Minidump*/
+	strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name));
+	md_entry.virt_addr = (uintptr_t)lpm_debug;
+	md_entry.phys_addr = lpm_debug_phys;
+	md_entry.size = size;
+	if (msm_minidump_add_region(&md_entry))
+		pr_info("Failed to add lpm_debug in Minidump\n");
+
+	return 0;
+failed:
+	free_cluster_node(lpm_root_node);
+	lpm_root_node = NULL;
+	return ret;
+}
+
+static const struct of_device_id lpm_mtch_tbl[] = {
+	{.compatible = "qcom,lpm-levels"},
+	{},
+};
+
+static struct platform_driver lpm_driver = {
+	.probe = lpm_probe,
+	.driver = {
+		.name = "lpm-levels",
+		.owner = THIS_MODULE,
+		.of_match_table = lpm_mtch_tbl,
+	},
+};
+
+static int __init lpm_levels_module_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&lpm_driver);
+	if (rc) {
+		pr_info("Error registering %s\n", lpm_driver.driver.name);
+		goto fail;
+	}
+
+fail:
+	return rc;
+}
+late_initcall(lpm_levels_module_init);
+
+enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu)
+{
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+	enum msm_pm_l2_scm_flag retflag = MSM_SCM_L2_ON;
+
+	/*
+	 * No need to acquire the lock if probe isn't completed yet
+	 * In the event of the hotplug happening before lpm probe, we want to
+	 * flush the cache to make sure that L2 is flushed. In particular, this
+	 * could cause incoherencies for a cluster architecture. This wouldn't
+	 * affect the idle case as the idle driver wouldn't be registered
+	 * before the probe function
+	 */
+	if (!cluster)
+		return MSM_SCM_L2_OFF;
+
+	/*
+	 * Assumes L2 only. What/How parameters gets passed into TZ will
+	 * determine how this function reports this info back in msm-pm.c
+	 */
+	spin_lock(&cluster->sync_lock);
+
+	if (!cluster->lpm_dev) {
+		retflag = MSM_SCM_L2_OFF;
+		goto unlock_and_return;
+	}
+
+	if (!cpumask_equal(&cluster->num_children_in_sync,
+						&cluster->child_cpus))
+		goto unlock_and_return;
+
+	if (cluster->lpm_dev)
+		retflag = cluster->lpm_dev->tz_flag;
+	/*
+	 * The scm_handoff_lock will be release by the secure monitor.
+	 * It is used to serialize power-collapses from this point on,
+	 * so that both Linux and the secure context have a consistent
+	 * view regarding the number of running cpus (cpu_count).
+	 *
+	 * It must be acquired before releasing the cluster lock.
+	 */
+unlock_and_return:
+	update_debug_pc_event(PRE_PC_CB, retflag, 0xdeadbeef, 0xdeadbeef,
+			0xdeadbeef);
+	trace_pre_pc_cb(retflag);
+	remote_spin_lock_rlock_id(&scm_handoff_lock,
+				  REMOTE_SPINLOCK_TID_START + cpu);
+	spin_unlock(&cluster->sync_lock);
+	return retflag;
+}
+
+/**
+ * lpm_cpu_hotplug_enter(): Called by dying CPU to terminate in low power mode
+ *
+ * @cpu: cpuid of the dying CPU
+ *
+ * Called from platform_cpu_kill() to terminate hotplug in a low power mode
+ */
+void lpm_cpu_hotplug_enter(unsigned int cpu)
+{
+	enum msm_pm_sleep_mode mode = MSM_PM_SLEEP_MODE_NR;
+	struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+	int i;
+	int idx = -1;
+
+	/*
+	 * If lpm isn't probed yet, try to put cpu into the one of the modes
+	 * available
+	 */
+	if (!cluster) {
+		if (msm_spm_is_mode_avail(
+					MSM_SPM_MODE_POWER_COLLAPSE)){
+			mode = MSM_PM_SLEEP_MODE_POWER_COLLAPSE;
+		} else if (msm_spm_is_mode_avail(
+				MSM_SPM_MODE_FASTPC)) {
+			mode = MSM_PM_SLEEP_MODE_FASTPC;
+		} else if (msm_spm_is_mode_avail(
+				MSM_SPM_MODE_RETENTION)) {
+			mode = MSM_PM_SLEEP_MODE_RETENTION;
+		} else {
+			pr_err("No mode avail for cpu%d hotplug\n", cpu);
+			WARN_ON(1);
+			return;
+		}
+	} else {
+		struct lpm_cpu *lpm_cpu;
+		uint32_t ss_pwr = ~0U;
+
+		lpm_cpu = cluster->cpu;
+		for (i = 0; i < lpm_cpu->nlevels; i++) {
+			if (ss_pwr < lpm_cpu->levels[i].pwr.ss_power)
+				continue;
+			ss_pwr = lpm_cpu->levels[i].pwr.ss_power;
+			idx = i;
+			mode = lpm_cpu->levels[i].mode;
+		}
+
+		if (mode == MSM_PM_SLEEP_MODE_NR)
+			return;
+
+		WARN_ON(idx < 0);
+		cluster_prepare(cluster, get_cpu_mask(cpu), idx, false, 0);
+	}
+
+	msm_cpu_pm_enter_sleep(mode, false);
+}
diff --git a/drivers/cpuidle/lpm-levels-legacy.h b/drivers/cpuidle/lpm-levels-legacy.h
new file mode 100644
index 0000000..4a07355
--- /dev/null
+++ b/drivers/cpuidle/lpm-levels-legacy.h
@@ -0,0 +1,152 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <soc/qcom/pm-legacy.h>
+#include <soc/qcom/spm.h>
+
+#define NR_LPM_LEVELS 8
+
+extern bool use_psci;
+
+struct lpm_lookup_table {
+	uint32_t modes;
+	const char *mode_name;
+};
+
+struct power_params {
+	uint32_t latency_us;		/* Enter + Exit latency */
+	uint32_t ss_power;		/* Steady state power */
+	uint32_t energy_overhead;	/* Enter + exit over head */
+	uint32_t time_overhead_us;	/* Enter + exit overhead */
+	uint32_t residencies[NR_LPM_LEVELS];
+	uint32_t max_residency;
+};
+
+struct lpm_cpu_level {
+	const char *name;
+	enum msm_pm_sleep_mode mode;
+	bool use_bc_timer;
+	struct power_params pwr;
+	unsigned int psci_id;
+	bool is_reset;
+	bool jtag_save_restore;
+	bool hyp_psci;
+	int reset_level;
+};
+
+struct lpm_cpu {
+	struct lpm_cpu_level levels[NR_LPM_LEVELS];
+	int nlevels;
+	unsigned int psci_mode_shift;
+	unsigned int psci_mode_mask;
+	struct lpm_cluster *parent;
+};
+
+struct lpm_level_avail {
+	bool idle_enabled;
+	bool suspend_enabled;
+	struct kobject *kobj;
+	struct kobj_attribute idle_enabled_attr;
+	struct kobj_attribute suspend_enabled_attr;
+	void *data;
+	int idx;
+	bool cpu_node;
+};
+
+struct lpm_cluster_level {
+	const char *level_name;
+	int *mode;			/* SPM mode to enter */
+	int min_child_level;
+	struct cpumask num_cpu_votes;
+	struct power_params pwr;
+	bool notify_rpm;
+	bool disable_dynamic_routing;
+	bool sync_level;
+	bool last_core_only;
+	struct lpm_level_avail available;
+	unsigned int psci_id;
+	bool is_reset;
+	int reset_level;
+	bool no_cache_flush;
+};
+
+struct low_power_ops {
+	struct msm_spm_device *spm;
+	int (*set_mode)(struct low_power_ops *ops, int mode,
+				struct lpm_cluster_level *level);
+	enum msm_pm_l2_scm_flag tz_flag;
+};
+
+struct lpm_cluster {
+	struct list_head list;
+	struct list_head child;
+	const char *cluster_name;
+	const char **name;
+	unsigned long aff_level; /* Affinity level of the node */
+	struct low_power_ops *lpm_dev;
+	int ndevices;
+	struct lpm_cluster_level levels[NR_LPM_LEVELS];
+	int nlevels;
+	enum msm_pm_l2_scm_flag l2_flag;
+	int min_child_level;
+	int default_level;
+	int last_level;
+	struct lpm_cpu *cpu;
+	struct cpuidle_driver *drv;
+	spinlock_t sync_lock;
+	struct cpumask child_cpus;
+	struct cpumask num_children_in_sync;
+	struct lpm_cluster *parent;
+	struct lpm_stats *stats;
+	unsigned int psci_mode_shift;
+	unsigned int psci_mode_mask;
+	bool no_saw_devices;
+};
+
+int set_l2_mode(struct low_power_ops *ops, int mode,
+				struct lpm_cluster_level *level);
+int set_system_mode(struct low_power_ops *ops, int mode,
+				struct lpm_cluster_level *level);
+int set_l3_mode(struct low_power_ops *ops, int mode,
+				struct lpm_cluster_level *level);
+void lpm_suspend_wake_time(uint64_t wakeup_time);
+
+struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev);
+void free_cluster_node(struct lpm_cluster *cluster);
+void cluster_dt_walkthrough(struct lpm_cluster *cluster);
+
+int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj);
+bool lpm_cpu_mode_allow(unsigned int cpu,
+		unsigned int mode, bool from_idle);
+bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
+		unsigned int mode, bool from_idle);
+uint32_t *get_per_cpu_max_residency(int cpu);
+extern struct lpm_cluster *lpm_root_node;
+
+#ifdef CONFIG_SMP
+extern DEFINE_PER_CPU(bool, pending_ipi);
+static inline bool is_IPI_pending(const struct cpumask *mask)
+{
+	unsigned int cpu;
+
+	for_each_cpu(cpu, mask) {
+		if per_cpu(pending_ipi, cpu)
+			return true;
+	}
+	return false;
+}
+#else
+static inline bool is_IPI_pending(const struct cpumask *mask)
+{
+	return false;
+}
+#endif
diff --git a/drivers/cpuidle/lpm-levels-of-legacy.c b/drivers/cpuidle/lpm-levels-of-legacy.c
new file mode 100644
index 0000000..bf74124
--- /dev/null
+++ b/drivers/cpuidle/lpm-levels-of-legacy.c
@@ -0,0 +1,1006 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
+#include "lpm-levels-legacy.h"
+
+bool use_psci;
+enum lpm_type {
+	IDLE = 0,
+	SUSPEND,
+	LPM_TYPE_NR
+};
+
+struct lpm_type_str {
+	enum lpm_type type;
+	char *str;
+};
+
+static const struct lpm_type_str lpm_types[] = {
+	{IDLE, "idle_enabled"},
+	{SUSPEND, "suspend_enabled"},
+};
+
+static DEFINE_PER_CPU(uint32_t *, max_residency);
+static struct lpm_level_avail *cpu_level_available[NR_CPUS];
+static struct platform_device *lpm_pdev;
+
+static void *get_enabled_ptr(struct kobj_attribute *attr,
+					struct lpm_level_avail *avail)
+{
+	void *arg = NULL;
+
+	if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
+		arg = (void *) &avail->idle_enabled;
+	else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
+		arg = (void *) &avail->suspend_enabled;
+
+	return arg;
+}
+
+static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj,
+					struct kobj_attribute *attr)
+{
+	struct lpm_level_avail *avail = NULL;
+
+	if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
+		avail = container_of(attr, struct lpm_level_avail,
+					idle_enabled_attr);
+	else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
+		avail = container_of(attr, struct lpm_level_avail,
+					suspend_enabled_attr);
+
+	return avail;
+}
+
+static void set_optimum_cpu_residency(struct lpm_cpu *cpu, int cpu_id,
+		bool probe_time)
+{
+	int i, j;
+	bool mode_avail;
+	uint32_t *residency = per_cpu(max_residency, cpu_id);
+
+	for (i = 0; i < cpu->nlevels; i++) {
+		struct power_params *pwr = &cpu->levels[i].pwr;
+
+		mode_avail = probe_time ||
+			lpm_cpu_mode_allow(cpu_id, i, true);
+
+		if (!mode_avail) {
+			residency[i] = 0;
+			continue;
+		}
+
+		residency[i] = ~0;
+		for (j = i + 1; j < cpu->nlevels; j++) {
+			mode_avail = probe_time ||
+					lpm_cpu_mode_allow(cpu_id, j, true);
+
+			if (mode_avail &&
+				(residency[i] > pwr->residencies[j]) &&
+				(pwr->residencies[j] != 0))
+				residency[i] = pwr->residencies[j];
+		}
+	}
+}
+
+static void set_optimum_cluster_residency(struct lpm_cluster *cluster,
+		bool probe_time)
+{
+	int i, j;
+	bool mode_avail;
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct power_params *pwr = &cluster->levels[i].pwr;
+
+		mode_avail = probe_time ||
+			lpm_cluster_mode_allow(cluster, i,
+					true);
+
+		if (!mode_avail) {
+			pwr->max_residency = 0;
+			continue;
+		}
+
+		pwr->max_residency = ~0;
+		for (j = i+1; j < cluster->nlevels; j++) {
+			mode_avail = probe_time ||
+					lpm_cluster_mode_allow(cluster, j,
+							true);
+			if (mode_avail &&
+				(pwr->max_residency > pwr->residencies[j]) &&
+				(pwr->residencies[j] != 0))
+				pwr->max_residency = pwr->residencies[j];
+		}
+	}
+}
+
+uint32_t *get_per_cpu_max_residency(int cpu)
+{
+	return per_cpu(max_residency, cpu);
+}
+
+static ssize_t lpm_enable_show(struct kobject *kobj,
+			struct kobj_attribute *attr, char *buf)
+{
+	int ret = 0;
+	struct kernel_param kp;
+
+	kp.arg = get_enabled_ptr(attr, get_avail_ptr(kobj, attr));
+	ret = param_get_bool(buf, &kp);
+	if (ret > 0) {
+		strlcat(buf, "\n", PAGE_SIZE);
+		ret++;
+	}
+
+	return ret;
+}
+
+static ssize_t lpm_enable_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t len)
+{
+	int ret = 0;
+	struct kernel_param kp;
+	struct lpm_level_avail *avail;
+
+	avail = get_avail_ptr(kobj, attr);
+	if (WARN_ON(!avail))
+		return -EINVAL;
+
+	kp.arg = get_enabled_ptr(attr, avail);
+	ret = param_set_bool(buf, &kp);
+
+	if (avail->cpu_node)
+		set_optimum_cpu_residency(avail->data, avail->idx, false);
+	else
+		set_optimum_cluster_residency(avail->data, false);
+
+	return ret ? ret : len;
+}
+
+static int create_lvl_avail_nodes(const char *name,
+			struct kobject *parent, struct lpm_level_avail *avail,
+			void *data, int index, bool cpu_node)
+{
+	struct attribute_group *attr_group = NULL;
+	struct attribute **attr = NULL;
+	struct kobject *kobj = NULL;
+	int ret = 0;
+
+	kobj = kobject_create_and_add(name, parent);
+	if (!kobj)
+		return -ENOMEM;
+
+	attr_group = devm_kzalloc(&lpm_pdev->dev, sizeof(*attr_group),
+					GFP_KERNEL);
+	if (!attr_group) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	attr = devm_kzalloc(&lpm_pdev->dev,
+		sizeof(*attr) * (LPM_TYPE_NR + 1), GFP_KERNEL);
+	if (!attr) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	sysfs_attr_init(&avail->idle_enabled_attr.attr);
+	avail->idle_enabled_attr.attr.name = lpm_types[IDLE].str;
+	avail->idle_enabled_attr.attr.mode = 0644;
+	avail->idle_enabled_attr.show = lpm_enable_show;
+	avail->idle_enabled_attr.store = lpm_enable_store;
+
+	sysfs_attr_init(&avail->suspend_enabled_attr.attr);
+	avail->suspend_enabled_attr.attr.name = lpm_types[SUSPEND].str;
+	avail->suspend_enabled_attr.attr.mode = 0644;
+	avail->suspend_enabled_attr.show = lpm_enable_show;
+	avail->suspend_enabled_attr.store = lpm_enable_store;
+
+	attr[0] = &avail->idle_enabled_attr.attr;
+	attr[1] = &avail->suspend_enabled_attr.attr;
+	attr[2] = NULL;
+	attr_group->attrs = attr;
+
+	ret = sysfs_create_group(kobj, attr_group);
+	if (ret) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	avail->idle_enabled = true;
+	avail->suspend_enabled = true;
+	avail->kobj = kobj;
+	avail->data = data;
+	avail->idx = index;
+	avail->cpu_node = cpu_node;
+
+	return ret;
+
+failed:
+	kobject_put(kobj);
+	return ret;
+}
+
+static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
+{
+	int cpu;
+	int i, cpu_idx;
+	struct kobject **cpu_kobj = NULL;
+	struct lpm_level_avail *level_list = NULL;
+	char cpu_name[20] = {0};
+	int ret = 0;
+
+	cpu_kobj = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu_kobj) *
+			cpumask_weight(&p->child_cpus), GFP_KERNEL);
+	if (!cpu_kobj)
+		return -ENOMEM;
+
+	cpu_idx = 0;
+	for_each_cpu(cpu, &p->child_cpus) {
+		snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
+		cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name, parent);
+		if (!cpu_kobj[cpu_idx]) {
+			ret = -ENOMEM;
+			goto release_kobj;
+		}
+
+		level_list = devm_kzalloc(&lpm_pdev->dev,
+				p->cpu->nlevels * sizeof(*level_list),
+				GFP_KERNEL);
+		if (!level_list) {
+			ret = -ENOMEM;
+			goto release_kobj;
+		}
+
+		for (i = 0; i < p->cpu->nlevels; i++) {
+
+			ret = create_lvl_avail_nodes(p->cpu->levels[i].name,
+					cpu_kobj[cpu_idx], &level_list[i],
+					(void *)p->cpu, cpu, true);
+			if (ret)
+				goto release_kobj;
+		}
+
+		cpu_level_available[cpu] = level_list;
+		cpu_idx++;
+	}
+
+	return ret;
+
+release_kobj:
+	for (i = 0; i < cpumask_weight(&p->child_cpus); i++)
+		kobject_put(cpu_kobj[i]);
+
+	return ret;
+}
+
+int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)
+{
+	int ret = 0;
+	struct lpm_cluster *child = NULL;
+	int i;
+	struct kobject *cluster_kobj = NULL;
+
+	if (!p)
+		return -ENODEV;
+
+	cluster_kobj = kobject_create_and_add(p->cluster_name, kobj);
+	if (!cluster_kobj)
+		return -ENOMEM;
+
+	for (i = 0; i < p->nlevels; i++) {
+		ret = create_lvl_avail_nodes(p->levels[i].level_name,
+				cluster_kobj, &p->levels[i].available,
+				(void *)p, 0, false);
+		if (ret)
+			return ret;
+	}
+
+	list_for_each_entry(child, &p->child, list) {
+		ret = create_cluster_lvl_nodes(child, cluster_kobj);
+		if (ret)
+			return ret;
+	}
+
+	if (p->cpu) {
+		ret = create_cpu_lvl_nodes(p, cluster_kobj);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+bool lpm_cpu_mode_allow(unsigned int cpu,
+		unsigned int index, bool from_idle)
+{
+	struct lpm_level_avail *avail = cpu_level_available[cpu];
+
+	if (!lpm_pdev || !avail)
+		return !from_idle;
+
+	return !!(from_idle ? avail[index].idle_enabled :
+				avail[index].suspend_enabled);
+}
+
+bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
+		unsigned int mode, bool from_idle)
+{
+	struct lpm_level_avail *avail = &cluster->levels[mode].available;
+
+	if (!lpm_pdev || !avail)
+		return false;
+
+	return !!(from_idle ? avail->idle_enabled :
+				avail->suspend_enabled);
+}
+
+static int parse_legacy_cluster_params(struct device_node *node,
+		struct lpm_cluster *c)
+{
+	int i;
+	char *key;
+	int ret;
+	struct lpm_match {
+		char *devname;
+		int (*set_mode)(struct low_power_ops *, int,
+					struct lpm_cluster_level *);
+	};
+	struct lpm_match match_tbl[] = {
+		{"l2", set_l2_mode},
+		{"cci", set_system_mode},
+		{"l3", set_l3_mode},
+		{"cbf", set_system_mode},
+	};
+
+
+	key = "qcom,spm-device-names";
+	c->ndevices = of_property_count_strings(node, key);
+
+	if (c->ndevices < 0) {
+		pr_info("%s(): Ignoring cluster params\n", __func__);
+		c->no_saw_devices = true;
+		c->ndevices = 0;
+		return 0;
+	}
+
+	c->name = devm_kzalloc(&lpm_pdev->dev, c->ndevices * sizeof(*c->name),
+				GFP_KERNEL);
+	c->lpm_dev = devm_kzalloc(&lpm_pdev->dev,
+				c->ndevices * sizeof(*c->lpm_dev),
+				GFP_KERNEL);
+	if (!c->name || !c->lpm_dev) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	for (i = 0; i < c->ndevices; i++) {
+		char device_name[20];
+		int j;
+
+		ret = of_property_read_string_index(node, key, i, &c->name[i]);
+		if (ret)
+			goto failed;
+		snprintf(device_name, sizeof(device_name), "%s-%s",
+				c->cluster_name, c->name[i]);
+
+		c->lpm_dev[i].spm = msm_spm_get_device_by_name(device_name);
+
+		if (IS_ERR_OR_NULL(c->lpm_dev[i].spm)) {
+			pr_err("Failed to get spm device by name:%s\n",
+					device_name);
+			ret = PTR_ERR(c->lpm_dev[i].spm);
+			goto failed;
+		}
+		for (j = 0; j < ARRAY_SIZE(match_tbl); j++) {
+			if (!strcmp(c->name[i], match_tbl[j].devname))
+				c->lpm_dev[i].set_mode = match_tbl[j].set_mode;
+		}
+
+		if (!c->lpm_dev[i].set_mode) {
+			ret = -ENODEV;
+			goto failed;
+		}
+	}
+
+	key = "qcom,default-level";
+	if (of_property_read_u32(node, key, &c->default_level))
+		c->default_level = 0;
+	return 0;
+failed:
+	pr_err("%s(): Failed reading %s\n", __func__, key);
+	return ret;
+}
+
+static int parse_cluster_params(struct device_node *node,
+		struct lpm_cluster *c)
+{
+	char *key;
+	int ret;
+
+	key = "label";
+	ret = of_property_read_string(node, key, &c->cluster_name);
+	if (ret) {
+		pr_err("%s(): Cannot read required param %s\n", __func__, key);
+		return ret;
+	}
+
+	if (use_psci) {
+		key = "qcom,psci-mode-shift";
+		ret = of_property_read_u32(node, key,
+				&c->psci_mode_shift);
+		if (ret) {
+			pr_err("%s(): Failed to read param: %s\n",
+							__func__, key);
+			return ret;
+		}
+
+		key = "qcom,psci-mode-mask";
+		ret = of_property_read_u32(node, key,
+				&c->psci_mode_mask);
+		if (ret) {
+			pr_err("%s(): Failed to read param: %s\n",
+							__func__, key);
+			return ret;
+		}
+
+		/* Set ndevice to 1 as default */
+		c->ndevices = 1;
+
+		return 0;
+	} else
+		return parse_legacy_cluster_params(node, c);
+}
+
+static int parse_lpm_mode(const char *str)
+{
+	int i;
+	struct lpm_lookup_table mode_lookup[] = {
+		{MSM_SPM_MODE_POWER_COLLAPSE, "pc"},
+		{MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE, "spc"},
+		{MSM_SPM_MODE_FASTPC, "fpc"},
+		{MSM_SPM_MODE_GDHS, "gdhs"},
+		{MSM_SPM_MODE_RETENTION, "retention"},
+		{MSM_SPM_MODE_CLOCK_GATING, "wfi"},
+		{MSM_SPM_MODE_DISABLED, "active"}
+	};
+
+	for (i = 0; i < ARRAY_SIZE(mode_lookup); i++)
+		if (!strcmp(str, mode_lookup[i].mode_name))
+			return  mode_lookup[i].modes;
+	return -EINVAL;
+}
+
+static int parse_power_params(struct device_node *node,
+		struct power_params *pwr)
+{
+	char *key;
+	int ret;
+
+	key = "qcom,latency-us";
+	ret  = of_property_read_u32(node, key, &pwr->latency_us);
+	if (ret)
+		goto fail;
+
+	key = "qcom,ss-power";
+	ret = of_property_read_u32(node, key, &pwr->ss_power);
+	if (ret)
+		goto fail;
+
+	key = "qcom,energy-overhead";
+	ret = of_property_read_u32(node, key, &pwr->energy_overhead);
+	if (ret)
+		goto fail;
+
+	key = "qcom,time-overhead";
+	ret = of_property_read_u32(node, key, &pwr->time_overhead_us);
+	if (ret)
+		goto fail;
+
+fail:
+	if (ret)
+		pr_err("%s(): %s Error reading %s\n", __func__, node->name,
+				key);
+	return ret;
+}
+
+static int parse_cluster_level(struct device_node *node,
+		struct lpm_cluster *cluster)
+{
+	int i = 0;
+	struct lpm_cluster_level *level = &cluster->levels[cluster->nlevels];
+	int ret = -ENOMEM;
+	char *key;
+
+	key = "label";
+	ret = of_property_read_string(node, key, &level->level_name);
+	if (ret)
+		goto failed;
+
+	if (use_psci) {
+		char *k = "qcom,psci-mode";
+
+		ret = of_property_read_u32(node, k, &level->psci_id);
+		if (ret)
+			goto failed;
+
+		level->is_reset = of_property_read_bool(node, "qcom,is-reset");
+	} else if (!cluster->no_saw_devices) {
+		key  = "no saw-devices";
+
+		level->mode = devm_kzalloc(&lpm_pdev->dev,
+				cluster->ndevices * sizeof(*level->mode),
+				GFP_KERNEL);
+		if (!level->mode) {
+			pr_err("Memory allocation failed\n");
+			goto failed;
+		}
+
+		for (i = 0; i < cluster->ndevices; i++) {
+			const char *spm_mode;
+			char key[25] = {0};
+
+			snprintf(key, 25, "qcom,spm-%s-mode", cluster->name[i]);
+			ret = of_property_read_string(node, key, &spm_mode);
+			if (ret)
+				goto failed;
+
+			level->mode[i] = parse_lpm_mode(spm_mode);
+
+			if (level->mode[i] < 0)
+				goto failed;
+
+			if (level->mode[i] == MSM_SPM_MODE_POWER_COLLAPSE
+				|| level->mode[i] ==
+				MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE)
+				level->is_reset |= true;
+		}
+	}
+
+	key = "label";
+	ret = of_property_read_string(node, key, &level->level_name);
+	if (ret)
+		goto failed;
+
+	if (cluster->nlevels != cluster->default_level) {
+		key = "min child idx";
+		ret = of_property_read_u32(node, "qcom,min-child-idx",
+				&level->min_child_level);
+		if (ret)
+			goto failed;
+
+		if (cluster->min_child_level > level->min_child_level)
+			cluster->min_child_level = level->min_child_level;
+	}
+
+	level->notify_rpm = of_property_read_bool(node, "qcom,notify-rpm");
+	level->disable_dynamic_routing = of_property_read_bool(node,
+					"qcom,disable-dynamic-int-routing");
+	level->last_core_only = of_property_read_bool(node,
+					"qcom,last-core-only");
+	level->no_cache_flush = of_property_read_bool(node,
+					"qcom,no-cache-flush");
+
+	key = "parse_power_params";
+	ret = parse_power_params(node, &level->pwr);
+	if (ret)
+		goto failed;
+
+	key = "qcom,reset-level";
+	ret = of_property_read_u32(node, key, &level->reset_level);
+	if (ret == -EINVAL)
+		level->reset_level = LPM_RESET_LVL_NONE;
+	else if (ret)
+		goto failed;
+
+	cluster->nlevels++;
+	return 0;
+failed:
+	pr_err("Failed %s() key = %s ret = %d\n", __func__, key, ret);
+	return ret;
+}
+
+static int parse_cpu_spm_mode(const char *mode_name)
+{
+	struct lpm_lookup_table pm_sm_lookup[] = {
+		{MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
+			"wfi"},
+		{MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
+			"standalone_pc"},
+		{MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
+			"pc"},
+		{MSM_PM_SLEEP_MODE_RETENTION,
+			"retention"},
+		{MSM_PM_SLEEP_MODE_FASTPC,
+			"fpc"},
+	};
+	int i;
+	int ret = -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(pm_sm_lookup); i++) {
+		if (!strcmp(mode_name, pm_sm_lookup[i].mode_name)) {
+			ret = pm_sm_lookup[i].modes;
+			break;
+		}
+	}
+	return ret;
+}
+
+static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l)
+{
+	char *key;
+	int ret;
+
+	key = "qcom,spm-cpu-mode";
+	ret  =  of_property_read_string(n, key, &l->name);
+	if (ret) {
+		pr_err("Failed %s %d\n", n->name, __LINE__);
+		return ret;
+	}
+
+	if (use_psci) {
+		key = "qcom,psci-cpu-mode";
+
+		ret = of_property_read_u32(n, key, &l->psci_id);
+		if (ret) {
+			pr_err("Failed reading %s on device %s\n", key,
+					n->name);
+			return ret;
+		}
+		key = "qcom,hyp-psci";
+
+		l->hyp_psci = of_property_read_bool(n, key);
+	} else {
+		l->mode = parse_cpu_spm_mode(l->name);
+
+		if (l->mode < 0)
+			return l->mode;
+	}
+	return 0;
+
+}
+
+static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
+{
+	struct device_node *cpu_node;
+	int cpu;
+	int idx = 0;
+
+	cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
+	if (!cpu_node) {
+		pr_info("%s: No CPU phandle, assuming single cluster\n",
+				node->full_name);
+		/*
+		 * Not all targets have the cpu node populated in the device
+		 * tree. If cpu node is not populated assume all possible
+		 * nodes belong to this cluster
+		 */
+		cpumask_copy(mask, cpu_possible_mask);
+		return 0;
+	}
+
+	while (cpu_node) {
+		for_each_possible_cpu(cpu) {
+			if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+				cpumask_set_cpu(cpu, mask);
+				break;
+			}
+		}
+		of_node_put(cpu_node);
+		cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
+	}
+
+	return 0;
+}
+
+static int calculate_residency(struct power_params *base_pwr,
+					struct power_params *next_pwr)
+{
+	int32_t residency = (int32_t)(next_pwr->energy_overhead -
+						base_pwr->energy_overhead) -
+		((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us)
+		- (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us));
+
+	residency /= (int32_t)(base_pwr->ss_power  - next_pwr->ss_power);
+
+	if (residency < 0) {
+		pr_err("%s: residency < 0 for LPM\n",
+				__func__);
+		return next_pwr->time_overhead_us;
+	}
+
+	return residency < next_pwr->time_overhead_us ?
+				next_pwr->time_overhead_us : residency;
+}
+
+static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
+{
+	struct device_node *n;
+	int ret = -ENOMEM;
+	int i, j;
+	char *key;
+
+	c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL);
+	if (!c->cpu)
+		return ret;
+
+	c->cpu->parent = c;
+	if (use_psci) {
+
+		key = "qcom,psci-mode-shift";
+
+		ret = of_property_read_u32(node, key, &c->cpu->psci_mode_shift);
+		if (ret) {
+			pr_err("Failed reading %s on device %s\n", key,
+					node->name);
+			return ret;
+		}
+		key = "qcom,psci-mode-mask";
+
+		ret = of_property_read_u32(node, key, &c->cpu->psci_mode_mask);
+		if (ret) {
+			pr_err("Failed reading %s on device %s\n", key,
+					node->name);
+			return ret;
+		}
+	}
+	for_each_child_of_node(node, n) {
+		struct lpm_cpu_level *l = &c->cpu->levels[c->cpu->nlevels];
+
+		c->cpu->nlevels++;
+
+		ret = parse_cpu_mode(n, l);
+		if (ret < 0) {
+			pr_info("Failed %s\n", l->name);
+			goto failed;
+		}
+
+		ret = parse_power_params(n, &l->pwr);
+		if (ret)
+			goto failed;
+
+		key = "qcom,use-broadcast-timer";
+		l->use_bc_timer = of_property_read_bool(n, key);
+
+		l->is_reset = of_property_read_bool(n, "qcom,is-reset");
+
+		key = "qcom,jtag-save-restore";
+		l->jtag_save_restore = of_property_read_bool(n, key);
+
+		key = "qcom,reset-level";
+		ret = of_property_read_u32(n, key, &l->reset_level);
+		if (ret == -EINVAL)
+			l->reset_level = LPM_RESET_LVL_NONE;
+		else if (ret)
+			goto failed;
+		of_node_put(n);
+	}
+	for (i = 0; i < c->cpu->nlevels; i++) {
+		for (j = 0; j < c->cpu->nlevels; j++) {
+			if (i >= j) {
+				c->cpu->levels[i].pwr.residencies[j] = 0;
+				continue;
+			}
+
+			c->cpu->levels[i].pwr.residencies[j] =
+				calculate_residency(&c->cpu->levels[i].pwr,
+					&c->cpu->levels[j].pwr);
+
+			pr_err("%s: idx %d %u\n", __func__, j,
+					c->cpu->levels[i].pwr.residencies[j]);
+		}
+	}
+
+	return 0;
+failed:
+	of_node_put(n);
+	pr_err("%s(): Failed with error code:%d\n", __func__, ret);
+	return ret;
+}
+
+void free_cluster_node(struct lpm_cluster *cluster)
+{
+	struct lpm_cluster *cl, *m;
+
+	list_for_each_entry_safe(cl, m, &cluster->child, list) {
+		list_del(&cl->list);
+		free_cluster_node(cl);
+	};
+
+	cluster->ndevices = 0;
+}
+
+/*
+ * TODO:
+ * Expects a CPU or a cluster only. This ensures that affinity
+ * level of a cluster is consistent with reference to its
+ * child nodes.
+ */
+static struct lpm_cluster *parse_cluster(struct device_node *node,
+		struct lpm_cluster *parent)
+{
+	struct lpm_cluster *c;
+	struct device_node *n;
+	char *key;
+	int ret = 0;
+	int i, j;
+
+	c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	ret = parse_cluster_params(node, c);
+
+	if (ret)
+		goto failed_parse_params;
+
+	INIT_LIST_HEAD(&c->child);
+	c->parent = parent;
+	spin_lock_init(&c->sync_lock);
+	c->min_child_level = NR_LPM_LEVELS;
+
+	for_each_child_of_node(node, n) {
+
+		if (!n->name)
+			continue;
+		key = "qcom,pm-cluster-level";
+		if (!of_node_cmp(n->name, key)) {
+			if (parse_cluster_level(n, c))
+				goto failed_parse_cluster;
+			continue;
+		}
+
+		key = "qcom,pm-cluster";
+		if (!of_node_cmp(n->name, key)) {
+			struct lpm_cluster *child;
+
+			if (c->no_saw_devices)
+				pr_info("%s: SAW device not provided.\n",
+					__func__);
+
+			child = parse_cluster(n, c);
+			if (!child)
+				goto failed_parse_cluster;
+
+			of_node_put(n);
+			list_add(&child->list, &c->child);
+			cpumask_or(&c->child_cpus, &c->child_cpus,
+					&child->child_cpus);
+			c->aff_level = child->aff_level + 1;
+			continue;
+		}
+
+		key = "qcom,pm-cpu";
+		if (!of_node_cmp(n->name, key)) {
+			/*
+			 * Parse the the cpu node only if a pm-cpu node
+			 * is available, though the mask is defined @ the
+			 * cluster level
+			 */
+			if (get_cpumask_for_node(node, &c->child_cpus))
+				goto failed_parse_cluster;
+
+			if (parse_cpu_levels(n, c))
+				goto failed_parse_cluster;
+
+			c->aff_level = 1;
+
+			for_each_cpu(i, &c->child_cpus) {
+				per_cpu(max_residency, i) = devm_kzalloc(
+					&lpm_pdev->dev,
+					sizeof(uint32_t) * c->cpu->nlevels,
+					GFP_KERNEL);
+				if (!per_cpu(max_residency, i))
+					return ERR_PTR(-ENOMEM);
+				set_optimum_cpu_residency(c->cpu, i, true);
+			}
+		}
+	}
+
+	if (cpumask_intersects(&c->child_cpus, cpu_online_mask))
+		c->last_level = c->default_level;
+	else
+		c->last_level = c->nlevels-1;
+
+	for (i = 0; i < c->nlevels; i++) {
+		for (j = 0; j < c->nlevels; j++) {
+			if (i >= j) {
+				c->levels[i].pwr.residencies[j] = 0;
+				continue;
+			}
+			c->levels[i].pwr.residencies[j] = calculate_residency(
+				&c->levels[i].pwr, &c->levels[j].pwr);
+		}
+	}
+	set_optimum_cluster_residency(c, true);
+	return c;
+
+failed_parse_cluster:
+	pr_err("Failed parse cluster:%s\n", key);
+	if (parent)
+		list_del(&c->list);
+	free_cluster_node(c);
+failed_parse_params:
+	pr_err("Failed parse params\n");
+	return NULL;
+}
+struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
+{
+	struct device_node *top = NULL;
+	struct lpm_cluster *c;
+
+	use_psci = of_property_read_bool(pdev->dev.of_node, "qcom,use-psci");
+
+	top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
+	if (!top) {
+		pr_err("Failed to find root node\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	lpm_pdev = pdev;
+	c = parse_cluster(top, NULL);
+	of_node_put(top);
+	return c;
+}
+
+void cluster_dt_walkthrough(struct lpm_cluster *cluster)
+{
+	struct list_head *list;
+	int i, j;
+	static int id;
+	char str[10] = {0};
+
+	if (!cluster)
+		return;
+
+	for (i = 0; i < id; i++)
+		snprintf(str+i, 10 - i, "\t");
+	pr_info("%d\n", __LINE__);
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct lpm_cluster_level *l = &cluster->levels[i];
+
+		pr_info("%d ndevices:%d\n", __LINE__, cluster->ndevices);
+		for (j = 0; j < cluster->ndevices; j++)
+			pr_info("%sDevice: %pk id:%pk\n", str,
+					&cluster->name[j], &l->mode[i]);
+	}
+
+	if (cluster->cpu) {
+		pr_info("%d\n", __LINE__);
+		for (j = 0; j < cluster->cpu->nlevels; j++)
+			pr_info("%s\tCPU mode: %s id:%d\n", str,
+					cluster->cpu->levels[j].name,
+					cluster->cpu->levels[j].mode);
+	}
+
+	id++;
+
+
+	list_for_each(list, &cluster->child) {
+		struct lpm_cluster *n;
+
+		pr_info("%d\n", __LINE__);
+		n = list_entry(list, typeof(*n), list);
+		cluster_dt_walkthrough(n);
+	}
+	id--;
+}
diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c
index 7a653c6..1d1d7e7 100644
--- a/drivers/cpuidle/lpm-levels-of.c
+++ b/drivers/cpuidle/lpm-levels-of.c
@@ -614,6 +614,7 @@
 				break;
 			}
 		}
+		of_node_put(cpu_node);
 		cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
 	}
 
@@ -651,13 +652,16 @@
 		cpu->nlevels++;
 
 		ret = parse_cpu_mode(n, l);
-		if (ret)
+		if (ret) {
+			of_node_put(n);
 			return ret;
+		}
 
 		ret = parse_power_params(n, &l->pwr);
-		if (ret)
+		if (ret) {
+			of_node_put(n);
 			return ret;
-
+		}
 		key = "qcom,use-broadcast-timer";
 		l->use_bc_timer = of_property_read_bool(n, key);
 
@@ -670,6 +674,7 @@
 			l->reset_level = LPM_RESET_LVL_NONE;
 		else if (ret)
 			return ret;
+		of_node_put(n);
 	}
 
 	for (i = 0; i < cpu->nlevels; i++) {
@@ -820,8 +825,11 @@
 
 		key = "qcom,pm-cluster-level";
 		if (!of_node_cmp(n->name, key)) {
-			if (parse_cluster_level(n, c))
+			if (parse_cluster_level(n, c)) {
+				of_node_put(n);
 				goto failed_parse_cluster;
+			}
+			of_node_put(n);
 			continue;
 		}
 
@@ -830,22 +838,28 @@
 			struct lpm_cluster *child;
 
 			child = parse_cluster(n, c);
-			if (!child)
+			if (!child) {
+				of_node_put(n);
 				goto failed_parse_cluster;
+			}
 
 			list_add(&child->list, &c->child);
 			cpumask_or(&c->child_cpus, &c->child_cpus,
 					&child->child_cpus);
 			c->aff_level = child->aff_level + 1;
+			of_node_put(n);
 			continue;
 		}
 
 		key = "qcom,pm-cpu";
 		if (!of_node_cmp(n->name, key)) {
-			if (parse_cpu_levels(n, c))
+			if (parse_cpu_levels(n, c)) {
+				of_node_put(n);
 				goto failed_parse_cluster;
+			}
 
 			c->aff_level = 1;
+			of_node_put(n);
 		}
 	}
 
@@ -879,6 +893,7 @@
 struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
 {
 	struct device_node *top = NULL;
+	struct lpm_cluster *c;
 
 	top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
 	if (!top) {
@@ -887,7 +902,9 @@
 	}
 
 	lpm_pdev = pdev;
-	return parse_cluster(top, NULL);
+	c = parse_cluster(top, NULL);
+	of_node_put(top);
+	return c;
 }
 
 void cluster_dt_walkthrough(struct lpm_cluster *cluster)
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 9694225..c7ec868 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -159,7 +159,7 @@
 	uint32_t latency = 0;
 	int i;
 
-	if (!cluster->list.next) {
+	if (list_empty(&cluster->list)) {
 		for (i = 0; i < cluster->nlevels; i++) {
 			level = &cluster->levels[i];
 			pwr_params = &level->pwr;
@@ -341,6 +341,11 @@
 {
 	unsigned int cpu = raw_smp_processor_id();
 	struct hrtimer *cpu_histtimer = &per_cpu(histtimer, cpu);
+	ktime_t time_rem;
+
+	time_rem = hrtimer_get_remaining(cpu_histtimer);
+	if (ktime_to_us(time_rem) <= 0)
+		return;
 
 	hrtimer_try_to_cancel(cpu_histtimer);
 }
@@ -386,11 +391,21 @@
 {
 	int cpu = raw_smp_processor_id();
 	struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
+	ktime_t time_rem;
 
-	hrtimer_try_to_cancel(&cluster->histtimer);
+	time_rem = hrtimer_get_remaining(&cluster->histtimer);
+	if (ktime_to_us(time_rem) > 0)
+		hrtimer_try_to_cancel(&cluster->histtimer);
 
-	if (cluster->parent)
+	if (cluster->parent) {
+		time_rem = hrtimer_get_remaining(
+			&cluster->parent->histtimer);
+
+		if (ktime_to_us(time_rem) <= 0)
+			return;
+
 		hrtimer_try_to_cancel(&cluster->parent->histtimer);
+	}
 }
 
 static enum hrtimer_restart clusttimer_fn(struct hrtimer *h)
@@ -1394,11 +1409,11 @@
 	dev->last_residency = ktime_us_delta(ktime_get(), start);
 	update_history(dev, idx);
 	trace_cpu_idle_exit(idx, success);
-	local_irq_enable();
 	if (lpm_prediction && cpu->lpm_prediction) {
 		histtimer_cancel();
 		clusttimer_cancel();
 	}
+	local_irq_enable();
 	return idx;
 }
 
diff --git a/drivers/cpuidle/lpm-workarounds.c b/drivers/cpuidle/lpm-workarounds.c
new file mode 100644
index 0000000..657e2b9
--- /dev/null
+++ b/drivers/cpuidle/lpm-workarounds.c
@@ -0,0 +1,147 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <lpm-workarounds.h>
+
+static struct regulator *lpm_cx_reg;
+static struct work_struct dummy_vote_work;
+static struct workqueue_struct *lpm_wa_wq;
+static bool lpm_wa_cx_turbo_unvote;
+static bool skip_l2_spm;
+
+/* While exiting from RPM assisted power collapse on some targets like MSM8939
+ * the CX is bumped to turbo mode by RPM. To reduce the power impact, APSS
+ * low power driver need to remove the CX turbo vote.
+ */
+static void send_dummy_cx_vote(struct work_struct *w)
+{
+	if (lpm_cx_reg) {
+		regulator_set_voltage(lpm_cx_reg,
+			RPM_REGULATOR_CORNER_SUPER_TURBO,
+			RPM_REGULATOR_CORNER_SUPER_TURBO);
+
+		regulator_set_voltage(lpm_cx_reg,
+			RPM_REGULATOR_CORNER_NONE,
+			RPM_REGULATOR_CORNER_SUPER_TURBO);
+	}
+}
+
+/*
+ * lpm_wa_cx_unvote_send(): Unvote for CX turbo mode
+ */
+void lpm_wa_cx_unvote_send(void)
+{
+	if (lpm_wa_cx_turbo_unvote)
+		queue_work(lpm_wa_wq, &dummy_vote_work);
+}
+EXPORT_SYMBOL(lpm_wa_cx_unvote_send);
+
+static int lpm_wa_cx_unvote_init(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	lpm_cx_reg = devm_regulator_get(&pdev->dev, "lpm-cx");
+	if (IS_ERR(lpm_cx_reg)) {
+		ret = PTR_ERR(lpm_cx_reg);
+		if (ret != -EPROBE_DEFER)
+			pr_err("Unable to get the CX regulator\n");
+		return ret;
+	}
+
+	INIT_WORK(&dummy_vote_work, send_dummy_cx_vote);
+
+	lpm_wa_wq = alloc_workqueue("lpm-wa",
+				WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
+
+	return ret;
+}
+
+static int lpm_wa_cx_unvote_exit(void)
+{
+	if (lpm_wa_wq)
+		destroy_workqueue(lpm_wa_wq);
+
+	return 0;
+}
+
+bool lpm_wa_get_skip_l2_spm(void)
+{
+	return skip_l2_spm;
+}
+EXPORT_SYMBOL(lpm_wa_get_skip_l2_spm);
+
+static int lpm_wa_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	lpm_wa_cx_turbo_unvote = of_property_read_bool(pdev->dev.of_node,
+					"qcom,lpm-wa-cx-turbo-unvote");
+	if (lpm_wa_cx_turbo_unvote) {
+		ret = lpm_wa_cx_unvote_init(pdev);
+		if (ret) {
+			pr_err("%s: Failed to initialize lpm_wa_cx_unvote (%d)\n",
+				__func__, ret);
+			return ret;
+		}
+	}
+
+	skip_l2_spm = of_property_read_bool(pdev->dev.of_node,
+						"qcom,lpm-wa-skip-l2-spm");
+
+	return ret;
+}
+
+static int lpm_wa_remove(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	if (lpm_wa_cx_turbo_unvote)
+		ret = lpm_wa_cx_unvote_exit();
+
+	return ret;
+}
+
+static const struct of_device_id lpm_wa_mtch_tbl[] = {
+	{.compatible = "qcom,lpm-workarounds"},
+	{},
+};
+
+static struct platform_driver lpm_wa_driver = {
+	.probe = lpm_wa_probe,
+	.remove = lpm_wa_remove,
+	.driver = {
+		.name = "lpm-workarounds",
+		.owner = THIS_MODULE,
+		.of_match_table = lpm_wa_mtch_tbl,
+	},
+};
+
+static int __init lpm_wa_module_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&lpm_wa_driver);
+	if (ret)
+		pr_info("Error registering %s\n", lpm_wa_driver.driver.name);
+
+	return ret;
+}
+late_initcall(lpm_wa_module_init);
diff --git a/drivers/cpuidle/lpm-workarounds.h b/drivers/cpuidle/lpm-workarounds.h
new file mode 100644
index 0000000..a290dcb
--- /dev/null
+++ b/drivers/cpuidle/lpm-workarounds.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LPM_WA_H
+#define __LPM_WA_H
+
+void lpm_wa_cx_unvote_send(void);
+bool lpm_wa_get_skip_l2_spm(void);
+
+#endif  /* __LPM_WA_H */
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index 3aa75aa..199d573 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -77,6 +77,8 @@
 #define QCOM_ICE_ENCRYPT	0x1
 #define QCOM_ICE_DECRYPT	0x2
 #define QCOM_SECT_LEN_IN_BYTE	512
+#define QCOM_UD_FOOTER_SIZE	0x4000
+#define QCOM_UD_FOOTER_SECS	(QCOM_UD_FOOTER_SIZE / QCOM_SECT_LEN_IN_BYTE)
 
 struct ice_clk_info {
 	struct list_head list;
@@ -127,8 +129,6 @@
 };
 
 static int ice_fde_flag;
-static unsigned long userdata_start;
-static unsigned long userdata_end;
 static struct ice_crypto_setting ice_data;
 
 static int qti_ice_setting_config(struct request *req,
@@ -152,22 +152,29 @@
 		return -EPERM;
 	}
 
+	if (!setting)
+		return -EINVAL;
+
 	if ((short)(crypto_data->key_index) >= 0) {
 
 		memcpy(&setting->crypto_data, crypto_data,
 				sizeof(setting->crypto_data));
 
-		if (rq_data_dir(req) == WRITE &&
-				(ice_fde_flag & QCOM_ICE_ENCRYPT))
-			setting->encr_bypass = false;
-		else if (rq_data_dir(req) == READ &&
-				(ice_fde_flag & QCOM_ICE_DECRYPT))
-			setting->decr_bypass = false;
-		else {
+		switch (rq_data_dir(req)) {
+		case WRITE:
+			if (!ice_fde_flag || (ice_fde_flag & QCOM_ICE_ENCRYPT))
+				setting->encr_bypass = false;
+			break;
+		case READ:
+			if (!ice_fde_flag || (ice_fde_flag & QCOM_ICE_DECRYPT))
+				setting->decr_bypass = false;
+			break;
+		default:
 			/* Should I say BUG_ON */
 			setting->encr_bypass = true;
 			setting->decr_bypass = true;
-			pr_debug("%s direction unknown", __func__);
+			pr_debug("%s(): direction unknown\n", __func__);
+			break;
 		}
 	}
 
@@ -181,26 +188,6 @@
 }
 EXPORT_SYMBOL(qcom_ice_set_fde_flag);
 
-int qcom_ice_set_fde_conf(sector_t s_sector, sector_t size,
-					int index, int mode)
-{
-	userdata_start = s_sector;
-	userdata_end = s_sector + size;
-	if (INT_MAX - s_sector < size) {
-		WARN_ON(1);
-		return -EINVAL;
-	}
-	ice_data.key_index = index;
-	ice_data.algo_mode = mode;
-	ice_data.key_size = ICE_CRYPTO_KEY_SIZE_256;
-	ice_data.key_mode = ICE_CRYPTO_USE_LUT_SW_KEY;
-
-	pr_debug("%s sector info set start %lu end %lu\n", __func__,
-		userdata_start, userdata_end);
-	return 0;
-}
-EXPORT_SYMBOL(qcom_ice_set_fde_conf);
-
 static int qcom_ice_enable_clocks(struct ice_device *, bool);
 
 #ifdef CONFIG_MSM_BUS_SCALING
@@ -1483,12 +1470,15 @@
 		struct request *req,
 		struct ice_data_setting *setting, bool async)
 {
+	struct ice_crypto_setting *crypto_data;
 	struct ice_crypto_setting pfk_crypto_data = {0};
 	int ret = 0;
 	bool is_pfe = false;
 	sector_t data_size;
+	union map_info *info;
+	unsigned long sec_end = 0;
 
-	if (!pdev || !req || !setting) {
+	if (!pdev || !req) {
 		pr_err("%s: Invalid params passed\n", __func__);
 		return -EINVAL;
 	}
@@ -1507,6 +1497,7 @@
 		/* It is not an error to have a request with no  bio */
 		return 0;
 	}
+    //pr_err("%s bio is %pK\n", __func__, req->bio);
 
 	ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async);
 	if (is_pfe) {
@@ -1521,22 +1512,46 @@
 				&pfk_crypto_data, setting);
 	}
 
-	if (ice_fde_flag == 0)
-		return 0;
-
-	if ((req->__sector >= userdata_start) &&
-			(req->__sector < userdata_end)) {
-	/*
-	 * Ugly hack to address non-block-size aligned userdata end address in
-	 * eMMC based devices.
-	 */
-		data_size = req->__data_len/QCOM_SECT_LEN_IN_BYTE;
-
-		if ((req->__sector + data_size) > userdata_end)
-			return 0;
-		else
+	if (!ice_fde_flag) {
+		if (bio_flagged(req->bio, BIO_INLINECRYPT)) {
+			info = dm_get_rq_mapinfo(req);
+			if (!info) {
+				pr_debug("%s info not available in request\n",
+							__func__);
+				return 0;
+			}
+			crypto_data = (struct ice_crypto_setting *)info->ptr;
+			if (!crypto_data) {
+				pr_err("%s crypto_data not available in req\n",
+							__func__);
+				return -EINVAL;
+			}
 			return qti_ice_setting_config(req, pdev,
-				&ice_data, setting);
+						crypto_data, setting);
+		}
+		return 0;
+	}
+
+	if (req->part && req->part->info && req->part->info->volname[0]) {
+		if (!strcmp(req->part->info->volname, "userdata")) {
+			sec_end = req->part->start_sect + req->part->nr_sects -
+					QCOM_UD_FOOTER_SECS;
+			if ((req->__sector >= req->part->start_sect) &&
+				(req->__sector < sec_end)) {
+				/*
+				 * Ugly hack to address non-block-size aligned
+				 * userdata end address in eMMC based devices.
+				 */
+				data_size = req->__data_len /
+						QCOM_SECT_LEN_IN_BYTE;
+
+				if ((req->__sector + data_size) > sec_end)
+					return 0;
+				else
+					return qti_ice_setting_config(req, pdev,
+						&ice_data, setting);
+			}
+		}
 	}
 
 	/*
@@ -1664,7 +1679,7 @@
 
 	list_for_each_entry(ice_dev, &ice_devices, list) {
 		if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
-			pr_info("%s: found ice device %p\n", __func__, ice_dev);
+			pr_debug("%s: ice device %pK\n", __func__, ice_dev);
 			return ice_dev;
 		}
 	}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index 3ac6c6c..16bb660 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -422,6 +422,7 @@
 
 module_platform_driver(sun4i_ss_driver);
 
+MODULE_ALIAS("platform:sun4i-ss");
 MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 42c060c..7c71722 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1116,10 +1116,10 @@
 	return count;
 }
 
-int talitos_sg_map(struct device *dev, struct scatterlist *src,
-		   unsigned int len, struct talitos_edesc *edesc,
-		   struct talitos_ptr *ptr,
-		   int sg_count, unsigned int offset, int tbl_off)
+static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
+			      unsigned int len, struct talitos_edesc *edesc,
+			      struct talitos_ptr *ptr, int sg_count,
+			      unsigned int offset, int tbl_off, int elen)
 {
 	struct talitos_private *priv = dev_get_drvdata(dev);
 	bool is_sec1 = has_ftr_sec1(priv);
@@ -1130,7 +1130,7 @@
 	}
 
 	to_talitos_ptr_len(ptr, len, is_sec1);
-	to_talitos_ptr_ext_set(ptr, 0, is_sec1);
+	to_talitos_ptr_ext_set(ptr, elen, is_sec1);
 
 	if (sg_count == 1) {
 		to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
@@ -1140,7 +1140,7 @@
 		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
 		return sg_count;
 	}
-	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
+	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
 					 &edesc->link_tbl[tbl_off]);
 	if (sg_count == 1) {
 		/* Only one segment now, so no link tbl needed*/
@@ -1154,6 +1154,15 @@
 	return sg_count;
 }
 
+static int talitos_sg_map(struct device *dev, struct scatterlist *src,
+			  unsigned int len, struct talitos_edesc *edesc,
+			  struct talitos_ptr *ptr, int sg_count,
+			  unsigned int offset, int tbl_off)
+{
+	return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
+				  tbl_off, 0);
+}
+
 /*
  * fill in and submit ipsec_esp descriptor
  */
@@ -1171,7 +1180,7 @@
 	unsigned int ivsize = crypto_aead_ivsize(aead);
 	int tbl_off = 0;
 	int sg_count, ret;
-	int sg_link_tbl_len;
+	int elen = 0;
 	bool sync_needed = false;
 	struct talitos_private *priv = dev_get_drvdata(dev);
 	bool is_sec1 = has_ftr_sec1(priv);
@@ -1225,20 +1234,12 @@
 	 * extent is bytes of HMAC postpended to ciphertext,
 	 * typically 12 for ipsec
 	 */
-	to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1);
-	to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1);
+	if ((desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
+	    (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
+		elen = authsize;
 
-	sg_link_tbl_len = cryptlen;
-
-	if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
-		to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
-
-		if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
-			sg_link_tbl_len += authsize;
-	}
-
-	ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
-			     &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
+	ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
+				 sg_count, areq->assoclen, tbl_off, elen);
 
 	if (ret > 1) {
 		tbl_off += ret;
diff --git a/drivers/devfreq/arm-memlat-mon.c b/drivers/devfreq/arm-memlat-mon.c
index 81f7c16..fe249e7 100644
--- a/drivers/devfreq/arm-memlat-mon.c
+++ b/drivers/devfreq/arm-memlat-mon.c
@@ -420,6 +420,7 @@
 	.driver = {
 		.name = "arm-memlat-mon",
 		.of_match_table = memlat_match_table,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
index 33e16261..d2aaffb 100644
--- a/drivers/devfreq/bimc-bwmon.c
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -1128,6 +1128,7 @@
 	.driver = {
 		.name = "bimc-bwmon",
 		.of_match_table = bimc_bwmon_match_table,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 7053bb4..1936383 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -305,7 +305,8 @@
 
 	poll_wait(file, &sync_file->wq, wait);
 
-	if (!test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
+	if (list_empty(&sync_file->cb.node) &&
+	    !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
 		if (fence_add_callback(sync_file->fence, &sync_file->cb,
 					   fence_check_cb_func) < 0)
 			wake_up_all(&sync_file->wq);
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index f3e211f..7186664 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -152,6 +152,7 @@
 	void __iomem *dma_base;
 	void __iomem *glob_base;
 	struct clk *clk;
+	struct clk *reg_clk;
 	struct tasklet_struct irq_tasklet;
 	struct list_head free_sw_desc;
 	struct dma_device dmadev;
@@ -697,13 +698,26 @@
 	if (ret)
 		return ret;
 
+	xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
+	if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
+		if (!IS_ERR(xor_dev->reg_clk)) {
+			ret = clk_prepare_enable(xor_dev->reg_clk);
+			if (ret)
+				return ret;
+		} else {
+			return PTR_ERR(xor_dev->reg_clk);
+		}
+	}
+
 	xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
-		return -EPROBE_DEFER;
+	if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
+		ret = EPROBE_DEFER;
+		goto disable_reg_clk;
+	}
 	if (!IS_ERR(xor_dev->clk)) {
 		ret = clk_prepare_enable(xor_dev->clk);
 		if (ret)
-			return ret;
+			goto disable_reg_clk;
 	}
 
 	ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
@@ -812,8 +826,9 @@
 free_msi_irqs:
 	platform_msi_domain_free_irqs(&pdev->dev);
 disable_clk:
-	if (!IS_ERR(xor_dev->clk))
-		clk_disable_unprepare(xor_dev->clk);
+	clk_disable_unprepare(xor_dev->clk);
+disable_reg_clk:
+	clk_disable_unprepare(xor_dev->reg_clk);
 	return ret;
 }
 
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index fb2e747..2c449bd 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1570,7 +1570,7 @@
 /* Returns 1 if state was updated, 0 otherwise */
 static int pl330_update(struct pl330_dmac *pl330)
 {
-	struct dma_pl330_desc *descdone, *tmp;
+	struct dma_pl330_desc *descdone;
 	unsigned long flags;
 	void __iomem *regs;
 	u32 val;
@@ -1648,7 +1648,9 @@
 	}
 
 	/* Now that we are in no hurry, do the callbacks */
-	list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) {
+	while (!list_empty(&pl330->req_done)) {
+		descdone = list_first_entry(&pl330->req_done,
+					    struct dma_pl330_desc, rqd);
 		list_del(&descdone->rqd);
 		spin_unlock_irqrestore(&pl330->lock, flags);
 		dma_pl330_rqcb(descdone, PL330_ERR_NONE);
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 03c4eb3..6497f52 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -387,6 +387,7 @@
 	struct device_dma_parameters dma_parms;
 	struct bam_chan *channels;
 	u32 num_channels;
+	u32 num_ees;
 
 	/* execution environment ID, from DT */
 	u32 ee;
@@ -1076,15 +1077,19 @@
 	u32 val;
 
 	/* read revision and configuration information */
-	val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT;
-	val &= NUM_EES_MASK;
+	if (!bdev->num_ees) {
+		val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
+		bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
+	}
 
 	/* check that configured EE is within range */
-	if (bdev->ee >= val)
+	if (bdev->ee >= bdev->num_ees)
 		return -EINVAL;
 
-	val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
-	bdev->num_channels = val & BAM_NUM_PIPES_MASK;
+	if (!bdev->num_channels) {
+		val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
+		bdev->num_channels = val & BAM_NUM_PIPES_MASK;
+	}
 
 	if (bdev->controlled_remotely)
 		return 0;
@@ -1179,6 +1184,18 @@
 	bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
 						"qcom,controlled-remotely");
 
+	if (bdev->controlled_remotely) {
+		ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
+					   &bdev->num_channels);
+		if (ret)
+			dev_err(bdev->dev, "num-channels unspecified in dt\n");
+
+		ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
+					   &bdev->num_ees);
+		if (ret)
+			dev_err(bdev->dev, "num-ees unspecified in dt\n");
+	}
+
 	bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
 	if (IS_ERR(bdev->bamclk))
 		return PTR_ERR(bdev->bamclk);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 4c357d4..d032032 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -870,7 +870,7 @@
 
 	rcar_dmac_chan_configure_desc(chan, desc);
 
-	max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
+	max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
 
 	/*
 	 * Allocate and fill the transfer chunk descriptors. We own the only
@@ -1246,8 +1246,17 @@
 	 * If the cookie doesn't correspond to the currently running transfer
 	 * then the descriptor hasn't been processed yet, and the residue is
 	 * equal to the full descriptor size.
+	 * Also, a client driver is possible to call this function before
+	 * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running"
+	 * will be the next descriptor, and the done list will appear. So, if
+	 * the argument cookie matches the done list's cookie, we can assume
+	 * the residue is zero.
 	 */
 	if (cookie != desc->async_tx.cookie) {
+		list_for_each_entry(desc, &chan->desc.done, node) {
+			if (cookie == desc->async_tx.cookie)
+				return 0;
+		}
 		list_for_each_entry(desc, &chan->desc.pending, node) {
 			if (cookie == desc->async_tx.cookie)
 				return desc->size;
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index 2e093c3..2b108fa 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -277,9 +277,8 @@
 	if (info->vbus_gpiod)
 		enable_irq(info->vbus_irq);
 
-	if (!device_may_wakeup(dev))
-		queue_delayed_work(system_power_efficient_wq,
-				   &info->wq_detcable, 0);
+	queue_delayed_work(system_power_efficient_wq,
+			   &info->wq_detcable, 0);
 
 	return ret;
 }
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 8bf8926..d731b41 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1130,7 +1130,13 @@
 		return -ENOMEM;
 
 	offset = (void *)&desc->buffer - (void *)desc;
-	desc->buffer_size = PAGE_SIZE - offset;
+	/*
+	 * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads
+	 * for descriptors, even 0x10-byte ones. This can cause page faults when
+	 * an IOMMU is in use and the oversized read crosses a page boundary.
+	 * Work around this by always leaving at least 0x10 bytes of padding.
+	 */
+	desc->buffer_size = PAGE_SIZE - offset - 0x10;
 	desc->buffer_bus = bus_addr + offset;
 	desc->used = 0;
 
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 88bebe1..42844c3 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -18,7 +18,7 @@
  * of and an antecedent to, SMBIOS, which stands for System
  * Management BIOS.  See further: http://www.dmtf.org/standards
  */
-static const char dmi_empty_string[] = "        ";
+static const char dmi_empty_string[] = "";
 
 static u32 dmi_ver __initdata;
 static u32 dmi_len;
@@ -44,25 +44,21 @@
 static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
 {
 	const u8 *bp = ((u8 *) dm) + dm->length;
+	const u8 *nsp;
 
 	if (s) {
-		s--;
-		while (s > 0 && *bp) {
+		while (--s > 0 && *bp)
 			bp += strlen(bp) + 1;
-			s--;
-		}
 
-		if (*bp != 0) {
-			size_t len = strlen(bp)+1;
-			size_t cmp_len = len > 8 ? 8 : len;
-
-			if (!memcmp(bp, dmi_empty_string, cmp_len))
-				return dmi_empty_string;
+		/* Strings containing only spaces are considered empty */
+		nsp = bp;
+		while (*nsp == ' ')
+			nsp++;
+		if (*nsp != '\0')
 			return bp;
-		}
 	}
 
-	return "";
+	return dmi_empty_string;
 }
 
 static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 03a5925..a9daf71 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -256,7 +256,7 @@
 	if (set)
 		reg |= bit;
 	else
-		reg &= bit;
+		reg &= ~bit;
 	iowrite32(reg, addr);
 
 	spin_unlock_irqrestore(&gpio->lock, flags);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 4f54ff4..56b2419 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -425,7 +425,7 @@
 	struct gpiohandle_request handlereq;
 	struct linehandle_state *lh;
 	struct file *file;
-	int fd, i, ret;
+	int fd, i, count = 0, ret;
 
 	if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
 		return -EFAULT;
@@ -471,6 +471,7 @@
 		if (ret)
 			goto out_free_descs;
 		lh->descs[i] = desc;
+		count = i;
 
 		if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
 			set_bit(FLAG_ACTIVE_LOW, &desc->flags);
@@ -537,7 +538,7 @@
 out_put_unused_fd:
 	put_unused_fd(fd);
 out_free_descs:
-	for (; i >= 0; i--)
+	for (i = 0; i < count; i++)
 		gpiod_free(lh->descs[i]);
 	kfree(lh->label);
 out_free_lh:
@@ -794,7 +795,7 @@
 	desc = &gdev->descs[offset];
 	ret = gpiod_request(desc, le->label);
 	if (ret)
-		goto out_free_desc;
+		goto out_free_label;
 	le->desc = desc;
 	le->eflags = eflags;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index a88d365..564362e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1484,10 +1484,11 @@
 static const u32 vgpr_init_regs[] =
 {
 	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
-	mmCOMPUTE_RESOURCE_LIMITS, 0,
+	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
 	mmCOMPUTE_NUM_THREAD_X, 256*4,
 	mmCOMPUTE_NUM_THREAD_Y, 1,
 	mmCOMPUTE_NUM_THREAD_Z, 1,
+	mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
 	mmCOMPUTE_PGM_RSRC2, 20,
 	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
 	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1504,10 +1505,11 @@
 static const u32 sgpr1_init_regs[] =
 {
 	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
-	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
+	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
 	mmCOMPUTE_NUM_THREAD_X, 256*5,
 	mmCOMPUTE_NUM_THREAD_Y, 1,
 	mmCOMPUTE_NUM_THREAD_Z, 1,
+	mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
 	mmCOMPUTE_PGM_RSRC2, 20,
 	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
 	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1528,6 +1530,7 @@
 	mmCOMPUTE_NUM_THREAD_X, 256*5,
 	mmCOMPUTE_NUM_THREAD_Y, 1,
 	mmCOMPUTE_NUM_THREAD_Z, 1,
+	mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
 	mmCOMPUTE_PGM_RSRC2, 20,
 	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
 	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index afec232..cfd80bc 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -53,7 +53,9 @@
 	}
 
 	drm_mode_connector_update_edid_property(connector, edid);
-	return drm_add_edid_modes(connector, edid);
+	ret = drm_add_edid_modes(connector, edid);
+	kfree(edid);
+	return ret;
 
 fallback:
 	/*
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index a7b2a75..cdb5358 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -322,19 +322,44 @@
 {
 	uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
 	ssize_t ret;
+	int retry;
 
 	if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
 		return 0;
 
-	ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
-				     &tmds_oen, sizeof(tmds_oen));
-	if (ret) {
-		DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
-			      enable ? "enable" : "disable");
-		return ret;
+	/*
+	 * LSPCON adapters in low-power state may ignore the first write, so
+	 * read back and verify the written value a few times.
+	 */
+	for (retry = 0; retry < 3; retry++) {
+		uint8_t tmp;
+
+		ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
+					     &tmds_oen, sizeof(tmds_oen));
+		if (ret) {
+			DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n",
+				      enable ? "enable" : "disable",
+				      retry + 1);
+			return ret;
+		}
+
+		ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
+					    &tmp, sizeof(tmp));
+		if (ret) {
+			DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n",
+				      enable ? "enabling" : "disabling",
+				      retry + 1);
+			return ret;
+		}
+
+		if (tmp == tmds_oen)
+			return 0;
 	}
 
-	return 0;
+	DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
+		      enable ? "enabling" : "disabling");
+
+	return -EIO;
 }
 EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 603d842..699db13 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -926,7 +926,7 @@
 	struct drm_device *drm_dev = g2d->subdrv.drm_dev;
 	struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
 	struct drm_exynos_pending_g2d_event *e;
-	struct timeval now;
+	struct timespec64 now;
 
 	if (list_empty(&runqueue_node->event_list))
 		return;
@@ -934,9 +934,9 @@
 	e = list_first_entry(&runqueue_node->event_list,
 			     struct drm_exynos_pending_g2d_event, base.link);
 
-	do_gettimeofday(&now);
+	ktime_get_ts64(&now);
 	e->event.tv_sec = now.tv_sec;
-	e->event.tv_usec = now.tv_usec;
+	e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
 	e->event.cmdlist_no = cmdlist_no;
 
 	drm_send_event(drm_dev, &e->base);
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
index 3049613..d7cbe53 100644
--- a/drivers/gpu/drm/exynos/regs-fimc.h
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -569,7 +569,7 @@
 #define EXYNOS_CIIMGEFF_FIN_EMBOSSING		(4 << 26)
 #define EXYNOS_CIIMGEFF_FIN_SILHOUETTE		(5 << 26)
 #define EXYNOS_CIIMGEFF_FIN_MASK			(7 << 26)
-#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK		((0xff < 13) | (0xff < 0))
+#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK		((0xff << 13) | (0xff << 0))
 
 /* Real input DMA size register */
 #define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE	(1 << 31)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 36a665f..e23748c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3681,7 +3681,11 @@
 					    struct intel_display_error_state *error);
 
 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
+int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
+				    u32 val, int timeout_us);
+#define sandybridge_pcode_write(dev_priv, mbox, val)	\
+	sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500)
+
 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
 		      u32 reply_mask, u32 reply, int timeout_base_ms);
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ce32303..c185625 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6012,8 +6012,8 @@
 
 	/* Inform power controller of upcoming frequency change */
 	mutex_lock(&dev_priv->rps.hw_lock);
-	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
-				      0x80000000);
+	ret = sandybridge_pcode_write_timeout(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+					      0x80000000, 2000);
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	if (ret) {
@@ -6044,8 +6044,9 @@
 	I915_WRITE(CDCLK_CTL, val);
 
 	mutex_lock(&dev_priv->rps.hw_lock);
-	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
-				      DIV_ROUND_UP(cdclk, 25000));
+	ret = sandybridge_pcode_write_timeout(dev_priv,
+					      HSW_PCODE_DE_WRITE_FREQ_REQ,
+					      DIV_ROUND_UP(cdclk, 25000), 2000);
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	if (ret) {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e1d47d5..3517c0e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -321,7 +321,8 @@
 
 	I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
 	POSTING_READ(lvds_encoder->reg);
-	if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000))
+
+	if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000))
 		DRM_ERROR("timed out waiting for panel to power on\n");
 
 	intel_panel_enable_backlight(intel_connector);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 49de476..05427d2 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -7913,8 +7913,8 @@
 	return 0;
 }
 
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
-			    u32 mbox, u32 val)
+int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
+				    u32 mbox, u32 val, int timeout_us)
 {
 	int status;
 
@@ -7935,7 +7935,7 @@
 
 	if (intel_wait_for_register_fw(dev_priv,
 				       GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
-				       500)) {
+				       timeout_us)) {
 		DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
 		return -ETIMEDOUT;
 	}
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 6be515a..8dbba61 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -189,7 +189,11 @@
 				  struct drm_crtc_state *old_crtc_state)
 {
 	drm_crtc_vblank_on(crtc);
+}
 
+static void ipu_crtc_atomic_flush(struct drm_crtc *crtc,
+				  struct drm_crtc_state *old_crtc_state)
+{
 	spin_lock_irq(&crtc->dev->event_lock);
 	if (crtc->state->event) {
 		WARN_ON(drm_crtc_vblank_get(crtc));
@@ -257,6 +261,7 @@
 	.mode_set_nofb = ipu_crtc_mode_set_nofb,
 	.atomic_check = ipu_crtc_atomic_check,
 	.atomic_begin = ipu_crtc_atomic_begin,
+	.atomic_flush = ipu_crtc_atomic_flush,
 	.atomic_disable = ipu_crtc_atomic_disable,
 	.enable = ipu_crtc_enable,
 };
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 3e5cdc6..d9839e7 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -1182,7 +1182,8 @@
 			dp->hdcp.ops->off(dp->hdcp.data);
 	}
 
-	if (dp->usbpd->hpd_high && dp->usbpd->alt_mode_cfg_done) {
+	if (dp->usbpd->hpd_high && !dp_display_is_sink_count_zero(dp) &&
+		dp->usbpd->alt_mode_cfg_done) {
 		if (dp->audio_supported)
 			dp->audio->off(dp->audio);
 
@@ -1235,7 +1236,8 @@
 	 * any notification from driver. Initialize post_open callback to notify
 	 * DP connection once framework restarts.
 	 */
-	if (dp->usbpd->hpd_high && dp->usbpd->alt_mode_cfg_done)
+	if (dp->usbpd->hpd_high && !dp_display_is_sink_count_zero(dp) &&
+		dp->usbpd->alt_mode_cfg_done)
 		dp_display->post_open = dp_display_post_open;
 
 	dp->power_on = false;
@@ -1486,6 +1488,7 @@
 	.driver = {
 		.name = "msm-dp-display",
 		.of_match_table = dp_dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
index 144ccd9..9a923aa 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -108,7 +108,8 @@
 
 /* DSI controller common ops */
 u32 dsi_ctrl_hw_cmn_get_interrupt_status(struct dsi_ctrl_hw *ctrl);
-void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl, u32 *entries,
+			       u32 size);
 void dsi_ctrl_hw_cmn_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints);
 void dsi_ctrl_hw_cmn_enable_status_interrupts(struct dsi_ctrl_hw *ctrl,
 					     u32 ints);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 32bc3eb..c0652dd 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -243,6 +243,8 @@
 						dsi_ctrl->cell_index);
 	sde_dbg_reg_register_base(dbg_name, dsi_ctrl->hw.base,
 				msm_iomap_size(dsi_ctrl->pdev, "dsi_ctrl"));
+	sde_dbg_reg_register_dump_range(dbg_name, dbg_name, 0,
+				msm_iomap_size(dsi_ctrl->pdev, "dsi_ctrl"), 0);
 error_remove_dir:
 	debugfs_remove(dir);
 error:
@@ -272,6 +274,8 @@
 	int rc = 0;
 	struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state;
 
+	SDE_EVT32(dsi_ctrl->cell_index, op);
+
 	switch (op) {
 	case DSI_CTRL_OP_POWER_STATE_CHANGE:
 		if (state->power_state == op_state) {
@@ -461,7 +465,7 @@
 	}
 
 	ctrl->hw.base = ptr;
-	pr_debug("[%s] map dsi_ctrl registers to %p\n", ctrl->name,
+	pr_debug("[%s] map dsi_ctrl registers to %pK\n", ctrl->name,
 		 ctrl->hw.base);
 
 	switch (ctrl->version) {
@@ -1338,10 +1342,20 @@
 	u32 current_read_len = 0, total_bytes_read = 0;
 	bool short_resp = false;
 	bool read_done = false;
-	u32 dlen, diff, rlen = msg->rx_len;
+	u32 dlen, diff, rlen;
 	unsigned char *buff;
 	char cmd;
+	struct dsi_cmd_desc *of_cmd;
 
+	if (!msg) {
+		pr_err("Invalid msg\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	of_cmd = container_of(msg, struct dsi_cmd_desc, msg);
+
+	rlen = msg->rx_len;
 	if (msg->rx_len <= 2) {
 		short_resp = true;
 		rd_pkt_size = msg->rx_len;
@@ -1783,16 +1797,20 @@
 	.driver = {
 		.name = "drm_dsi_ctrl",
 		.of_match_table = msm_dsi_of_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
 #if defined(CONFIG_DEBUG_FS)
 
-void dsi_ctrl_debug_dump(void)
+void dsi_ctrl_debug_dump(u32 *entries, u32 size)
 {
 	struct list_head *pos, *tmp;
 	struct dsi_ctrl *ctrl = NULL;
 
+	if (!entries || !size)
+		return;
+
 	mutex_lock(&dsi_ctrl_list_lock);
 	list_for_each_safe(pos, tmp, &dsi_ctrl_list) {
 		struct dsi_ctrl_list_item *n;
@@ -1800,7 +1818,7 @@
 		n = list_entry(pos, struct dsi_ctrl_list_item, list);
 		ctrl = n->ctrl;
 		pr_err("dsi ctrl:%d\n", ctrl->cell_index);
-		ctrl->hw.ops.debug_bus(&ctrl->hw);
+		ctrl->hw.ops.debug_bus(&ctrl->hw, entries, size);
 	}
 	mutex_unlock(&dsi_ctrl_list_lock);
 }
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index b059fc5..6ac7dd7 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -60,6 +60,18 @@
 #define DSI_CTRL_MAX_CMD_FIFO_STORE_SIZE 64
 
 /**
+ * enum dsi_channel_id - defines dsi channel id.
+ * @DSI_CTRL_LEFT:    DSI 0 channel
+ * @DSI_CTRL_RIGHT:   DSI 1 channel
+ * @DSI_CTRL_MAX:  Maximum value.
+ */
+enum dsi_channel_id {
+	DSI_CTRL_LEFT = 0,
+	DSI_CTRL_RIGHT,
+	DSI_CTRL_MAX,
+};
+
+/**
  * enum dsi_power_state - defines power states for dsi controller.
  * @DSI_CTRL_POWER_VREG_OFF:    Digital and analog supplies for DSI controller
 				turned off
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index d6fab59..73aab3f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -457,8 +457,10 @@
 	/**
 	 * debug_bus() - get dsi debug bus status.
 	 * @ctrl:        Pointer to the controller host hardware.
+	 * @entries:     Array of dsi debug bus control values.
+	 * @size:        Size of dsi debug bus control array.
 	 */
-	void (*debug_bus)(struct dsi_ctrl_hw *ctrl);
+	void (*debug_bus)(struct dsi_ctrl_hw *ctrl, u32 *entries, u32 size);
 
 	/**
 	 * soft_reset() - perform a soft reset on DSI controller
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index c1af52f..6dde454 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -459,18 +459,20 @@
 	pr_debug("[DSI_%d] Video engine setup done\n", ctrl->index);
 }
 
-void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl)
+void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl, u32 *entries, u32 size)
 {
-	u32 reg = 0;
+	u32 reg = 0, i = 0;
 
-	DSI_W32(ctrl, DSI_DEBUG_BUS_CTL, 0x181);
-
-	/* make sure that debug test point is enabled */
-	wmb();
-	reg = DSI_R32(ctrl, DSI_DEBUG_BUS_STATUS);
-
-	pr_err("[DSI_%d] debug bus status:0x%x\n", ctrl->index, reg);
+	for (i = 0; i < size; i++) {
+		DSI_W32(ctrl, DSI_DEBUG_BUS_CTL, entries[i]);
+		/* make sure that debug test point is enabled */
+		wmb();
+		reg = DSI_R32(ctrl, DSI_DEBUG_BUS_STATUS);
+		pr_err("[DSI_%d] debug bus ctrl: 0x%x status:0x%x\n",
+				ctrl->index, entries[i], reg);
+	}
 }
+
 /**
  * cmd_engine_setup() - setup dsi host controller for command mode
  * @ctrl:          Pointer to the controller host hardware.
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 7194f1a..9a75de6 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -48,13 +48,16 @@
 static char dsi_display_primary[MAX_CMDLINE_PARAM_LEN];
 static char dsi_display_secondary[MAX_CMDLINE_PARAM_LEN];
 static struct dsi_display_boot_param boot_displays[MAX_DSI_ACTIVE_DISPLAY];
-static struct device_node *default_active_node;
+static struct device_node *primary_active_node;
+static struct device_node *secondary_active_node;
+
 static const struct of_device_id dsi_display_dt_match[] = {
 	{.compatible = "qcom,dsi-display"},
 	{}
 };
 
-static struct dsi_display *main_display;
+static struct dsi_display *primary_display;
+static struct dsi_display *secondary_display;
 
 static void dsi_display_mask_ctrl_error_interrupts(struct dsi_display *display)
 {
@@ -568,6 +571,8 @@
 
 	for (i = 0; i < count; ++i) {
 		memset(config->status_buf, 0x0, SZ_4K);
+		if (config->status_cmd.state == DSI_CMD_SET_STATE_LP)
+			cmds[i].msg.flags |= MIPI_DSI_MSG_USE_LPM;
 		if (cmds[i].last_command) {
 			cmds[i].msg.flags |= MIPI_DSI_MSG_LASTCOMMAND;
 			flags |= DSI_CTRL_CMD_LAST_COMMAND;
@@ -614,12 +619,20 @@
 
 static int dsi_display_status_reg_read(struct dsi_display *display)
 {
-	int rc = 0, i;
+	int rc = 0, i, cmd_channel_idx = DSI_CTRL_LEFT;
 	struct dsi_display_ctrl *m_ctrl, *ctrl;
 
 	pr_debug(" ++\n");
 
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
+	/*
+	 * Check the Panel DSI command channel.
+	 * If the cmd_channel is set, then we should
+	 * choose the right DSI(DSI1) controller to send command,
+	 * else we choose the left(DSI0) controller.
+	 */
+	if (display->panel->esd_config.cmd_channel)
+		cmd_channel_idx = DSI_CTRL_RIGHT;
+	m_ctrl = &display->ctrl[cmd_channel_idx];
 
 	if (display->tx_cmd_buf == NULL) {
 		rc = dsi_host_alloc_cmd_tx_buffer(display);
@@ -715,6 +728,7 @@
 		dsi_panel_release_panel_lock(panel);
 		return rc;
 	}
+	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
 
 	if (te_check_override && gpio_is_valid(dsi_display->disp_te_gpio))
 		status_mode = ESD_MODE_PANEL_TE;
@@ -738,6 +752,7 @@
 	dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
 		DSI_ALL_CLKS, DSI_CLK_OFF);
 	dsi_panel_release_panel_lock(panel);
+	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
 
 	return rc;
 }
@@ -1153,7 +1168,7 @@
 		rc = dsi_panel_trigger_esd_attack(display->panel);
 		if (rc) {
 			pr_err("Failed to trigger ESD attack\n");
-			return rc;
+			goto error;
 		}
 	}
 
@@ -2007,11 +2022,9 @@
 			boot_displays[i].name[j] = *(disp_buf + j);
 		boot_displays[i].name[j] = '\0';
 
-		if (i == DSI_PRIMARY) {
+		if (i == DSI_PRIMARY)
 			boot_displays[i].is_primary = true;
-			/* Currently, secondary DSI display is not supported */
-			boot_displays[i].boot_disp_en = true;
-		}
+		boot_displays[i].boot_disp_en = true;
 	}
 	return 0;
 }
@@ -2034,6 +2047,8 @@
 
 	for (i = 0; i < MAX_DSI_ACTIVE_DISPLAY; i++) {
 		node = boot_displays[i].node;
+		if (!node)
+			continue;
 		ctrl_count = of_count_phandle_with_args(node, "qcom,dsi-ctrl",
 								NULL);
 
@@ -2075,11 +2090,12 @@
 
 	pr_err("index = %d\n", index);
 
-	if (boot_displays[index].node)
-		return boot_displays[index].node;
-	else if ((index == (MAX_DSI_ACTIVE_DISPLAY - 1))
-			&& (default_active_node))
-		return default_active_node;
+	if ((index == DSI_PRIMARY)
+			&& (primary_active_node))
+		return primary_active_node;
+	else if ((index == DSI_SECONDARY)
+			&& (secondary_active_node))
+		return secondary_active_node;
 	else
 		return NULL;
 }
@@ -3138,9 +3154,6 @@
 				__func__, rc);
 			goto error;
 		}
-
-		/* enable dsi to serve irqs */
-		dsi_display_ctrl_irq_update(display, true);
 	}
 
 	if ((clk & DSI_LINK_CLK) && (l_type & DSI_LINK_HS_CLK)) {
@@ -3162,6 +3175,11 @@
 			}
 		}
 	}
+
+	/* enable dsi to serve irqs */
+	if (clk & DSI_CORE_CLK)
+		dsi_display_ctrl_irq_update(display, true);
+
 error:
 	return rc;
 }
@@ -3665,6 +3683,7 @@
 	/* For split DSI, update the clock master first */
 
 	pr_debug("configuring seamless dynamic fps\n\n");
+	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
 
 	m_ctrl = &display->ctrl[display->clk_master_idx];
 	rc = dsi_ctrl_async_timing_update(m_ctrl->ctrl, timing);
@@ -3699,6 +3718,7 @@
 	panel_mode->dsi_mode_flags = 0;
 
 error:
+	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
 	return rc;
 }
 
@@ -4625,6 +4645,7 @@
 	.driver = {
 		.name = "msm-dsi-display",
 		.of_match_table = dsi_display_dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
@@ -4632,8 +4653,7 @@
 {
 	int rc = 0;
 	struct dsi_display *display;
-	static bool display_from_cmdline, boot_displays_parsed;
-	static bool comp_add_success;
+	static bool boot_displays_parsed;
 	static struct device_node *primary_np, *secondary_np;
 
 	if (!pdev || !pdev->dev.of_node) {
@@ -4662,55 +4682,53 @@
 	display->cmdline_topology = NO_OVERRIDE;
 	display->cmdline_timing = 0;
 
-	if ((!display_from_cmdline) &&
-			(boot_displays[DSI_PRIMARY].boot_disp_en)) {
-		display->is_active = dsi_display_name_compare(pdev->dev.of_node,
-						display->name, DSI_PRIMARY);
-		if (display->is_active) {
-			if (comp_add_success) {
-				(void)_dsi_display_dev_deinit(main_display);
-				component_del(&main_display->pdev->dev,
-					      &dsi_display_comp_ops);
-				mutex_lock(&dsi_display_list_lock);
-				list_del(&main_display->list);
-				mutex_unlock(&dsi_display_list_lock);
-				comp_add_success = false;
-				default_active_node = NULL;
-				pr_debug("removed the existing comp ops\n");
-			}
-			/*
-			 * Need to add component for
-			 * the secondary DSI display
-			 * when more than one DSI display
-			 * is supported.
-			 */
-			pr_debug("cmdline primary dsi: %s\n",
-						display->name);
-			display_from_cmdline = true;
-			dsi_display_parse_cmdline_topology(display,
-					DSI_PRIMARY);
-			primary_np = pdev->dev.of_node;
+	if (boot_displays[DSI_PRIMARY].boot_disp_en && !primary_np &&
+		dsi_display_name_compare(pdev->dev.of_node,
+			display->name, DSI_PRIMARY)) {
+		if (primary_display) {
+			(void)_dsi_display_dev_deinit(primary_display);
+			component_del(&primary_display->pdev->dev,
+					&dsi_display_comp_ops);
+			mutex_lock(&dsi_display_list_lock);
+			list_del(&primary_display->list);
+			mutex_unlock(&dsi_display_list_lock);
+			primary_active_node = NULL;
+			pr_debug("removed the existing comp ops\n");
 		}
+		/*
+		 * Need to add component for
+		 * the secondary DSI display
+		 * when more than one DSI display
+		 * is supported.
+		 */
+		pr_debug("cmdline primary dsi: %s\n", display->name);
+		display->is_active = true;
+		dsi_display_parse_cmdline_topology(display, DSI_PRIMARY);
+		primary_np = pdev->dev.of_node;
 	}
 
-	if (boot_displays[DSI_SECONDARY].boot_disp_en) {
-		if (!secondary_np) {
-			if (dsi_display_name_compare(pdev->dev.of_node,
-				display->name, DSI_SECONDARY)) {
-				pr_debug("cmdline secondary dsi: %s\n",
-							display->name);
-				secondary_np = pdev->dev.of_node;
-				if (primary_np) {
-					if (validate_dsi_display_selection()) {
-					display->is_active = true;
-					dsi_display_parse_cmdline_topology
-						(display, DSI_SECONDARY);
-					} else {
-						boot_displays[DSI_SECONDARY]
-							.boot_disp_en = false;
-					}
-				}
+	if (boot_displays[DSI_SECONDARY].boot_disp_en && !secondary_np &&
+		dsi_display_name_compare(pdev->dev.of_node,
+			display->name, DSI_SECONDARY)) {
+		pr_debug("cmdline secondary dsi: %s\n", display->name);
+		if (validate_dsi_display_selection()) {
+			if (secondary_display) {
+				(void)_dsi_display_dev_deinit(
+						secondary_display);
+				component_del(&secondary_display->pdev->dev,
+						&dsi_display_comp_ops);
+				mutex_lock(&dsi_display_list_lock);
+				list_del(&secondary_display->list);
+				mutex_unlock(&dsi_display_list_lock);
+				secondary_active_node = NULL;
+				pr_debug("removed the existing comp ops\n");
 			}
+			display->is_active = true;
+			dsi_display_parse_cmdline_topology(display,
+					DSI_SECONDARY);
+			secondary_np = pdev->dev.of_node;
+		} else {
+			boot_displays[DSI_SECONDARY].boot_disp_en = false;
 		}
 	}
 	display->display_type = of_get_property(pdev->dev.of_node,
@@ -4725,12 +4743,18 @@
 	list_add(&display->list, &dsi_display_list);
 	mutex_unlock(&dsi_display_list_lock);
 
-	if (!display_from_cmdline)
+	if (!strcmp(display->display_type, "primary") && !primary_np)
+		display->is_active = of_property_read_bool(pdev->dev.of_node,
+						"qcom,dsi-display-active");
+	else if (strcmp(display->display_type, "primary") && !secondary_np)
 		display->is_active = of_property_read_bool(pdev->dev.of_node,
 						"qcom,dsi-display-active");
 
 	if (display->is_active) {
-		main_display = display;
+		if (!strcmp(display->display_type, "primary"))
+			primary_display = display;
+		else
+			secondary_display = display;
 		rc = _dsi_display_dev_init(display);
 		if (rc) {
 			pr_err("device init failed, rc=%d\n", rc);
@@ -4741,10 +4765,11 @@
 		if (rc)
 			pr_err("component add failed, rc=%d\n", rc);
 
-		comp_add_success = true;
 		pr_debug("Component_add success: %s\n", display->name);
-		if (!display_from_cmdline)
-			default_active_node = pdev->dev.of_node;
+		if (!strcmp(display->display_type, "primary"))
+			primary_active_node = pdev->dev.of_node;
+		else
+			secondary_active_node = pdev->dev.of_node;
 	}
 	return rc;
 }
@@ -4983,7 +5008,10 @@
 		info->h_tile_instance[i] = display->ctrl[i].ctrl->cell_index;
 
 	info->is_connected = true;
-	info->is_primary = true;
+	info->is_primary = false;
+	if (!strcmp(display->display_type, "primary"))
+		info->is_primary = true;
+
 	info->width_mm = phy_props.panel_width_mm;
 	info->height_mm = phy_props.panel_height_mm;
 	info->max_width = 1920;
@@ -5820,6 +5848,7 @@
 		return -EINVAL;
 	}
 
+	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
 	mutex_lock(&display->display_lock);
 
 	mode = display->panel->cur_mode;
@@ -5954,6 +5983,7 @@
 	(void)dsi_panel_post_unprepare(display->panel);
 error:
 	mutex_unlock(&display->display_lock);
+	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
 	return rc;
 }
 
@@ -6177,6 +6207,7 @@
 		pr_err("no valid mode set for the display");
 		return -EINVAL;
 	}
+	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
 
 	/* Engine states and panel states are populated during splash
 	 * resource init and hence we return early
@@ -6262,6 +6293,7 @@
 	(void)dsi_panel_disable(display->panel);
 error:
 	mutex_unlock(&display->display_lock);
+	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
 	return rc;
 }
 
@@ -6324,6 +6356,7 @@
 		return -EINVAL;
 	}
 
+	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
 	mutex_lock(&display->display_lock);
 
 	rc = dsi_display_wake_up(display);
@@ -6352,6 +6385,7 @@
 		       display->name, rc);
 
 	mutex_unlock(&display->display_lock);
+	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
 	return rc;
 }
 
@@ -6381,6 +6415,7 @@
 		return -EINVAL;
 	}
 
+	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
 	mutex_lock(&display->display_lock);
 
 	rc = dsi_display_wake_up(display);
@@ -6434,6 +6469,7 @@
 		       display->name, rc);
 
 	mutex_unlock(&display->display_lock);
+	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
 	return rc;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index cb9c1fa..dab85f4 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -2776,6 +2776,9 @@
 				esd_config->groups * status_len);
 	}
 
+	esd_config->cmd_channel = of_property_read_bool(of_node,
+		"qcom,mdss-dsi-panel-cmds-only-by-right");
+
 	return 0;
 
 error4:
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index f8b65ab..c0ecb7f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -129,6 +129,7 @@
 
 struct drm_panel_esd_config {
 	bool esd_enabled;
+	bool cmd_channel;
 
 	enum esd_check_status_mode status_mode;
 	struct dsi_panel_cmd_set status_cmd;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index 2e2d0d8..3d6711f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -107,7 +107,8 @@
 
 	phy->hw.base = ptr;
 
-	pr_debug("[%s] map dsi_phy registers to %p\n", phy->name, phy->hw.base);
+	pr_debug("[%s] map dsi_phy registers to %pK\n",
+		phy->name, phy->hw.base);
 
 	return rc;
 }
@@ -564,6 +565,8 @@
 	snprintf(dbg_name, DSI_DEBUG_NAME_LEN, "dsi%d_phy", dsi_phy->index);
 	sde_dbg_reg_register_base(dbg_name, dsi_phy->hw.base,
 				msm_iomap_size(dsi_phy->pdev, "dsi_phy"));
+	sde_dbg_reg_register_dump_range(dbg_name, dbg_name, 0,
+				msm_iomap_size(dsi_phy->pdev, "dsi_phy"), 0);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index ec572f8..e457322 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -169,6 +169,7 @@
 	.driver = {
 		.name = "msm_dsi",
 		.of_match_table = dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index 0940e84..3a0f180 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015,2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -54,7 +54,7 @@
 		ret = -ENOMEM;
 		goto fail;
 	}
-	DBG("eDP probed=%p", edp);
+	DBG("eDP probed=%pK", edp);
 
 	edp->pdev = pdev;
 	platform_set_drvdata(pdev, edp);
@@ -128,6 +128,7 @@
 	.driver = {
 		.name = "msm_edp",
 		.of_match_table = dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 6279084..b8f5469 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016, 2018 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -646,6 +646,7 @@
 	.driver = {
 		.name = "hdmi_msm",
 		.of_match_table = msm_hdmi_dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index f05d760..46b60b1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2018 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -827,6 +827,7 @@
 	.driver = {
 		.name = "msm_mdp",
 		.of_match_table = mdp5_dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 95bdc36..e445098 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2014 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -25,6 +25,8 @@
 #include "msm_fence.h"
 #include "sde_trace.h"
 
+#define MULTIPLE_CONN_DETECTED(x) (x > 1)
+
 struct msm_commit {
 	struct drm_device *dev;
 	struct drm_atomic_state *state;
@@ -111,6 +113,66 @@
 		kfree(c);
 }
 
+static inline bool _msm_seamless_for_crtc(struct drm_atomic_state *state,
+			struct drm_crtc_state *crtc_state, bool enable)
+{
+	struct drm_connector *connector = NULL;
+	struct drm_connector_state  *conn_state = NULL;
+	int i = 0;
+	int conn_cnt = 0;
+
+	if (msm_is_mode_seamless(&crtc_state->mode) ||
+		msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode))
+		return true;
+
+	if (msm_is_mode_seamless_dms(&crtc_state->adjusted_mode) && !enable)
+		return true;
+
+	if (!crtc_state->mode_changed && crtc_state->connectors_changed) {
+		for_each_connector_in_state(state, connector, conn_state, i) {
+			if ((conn_state->crtc == crtc_state->crtc) ||
+					(connector->state->crtc ==
+					 crtc_state->crtc))
+				conn_cnt++;
+
+			if (MULTIPLE_CONN_DETECTED(conn_cnt))
+				return true;
+		}
+	}
+
+	return false;
+}
+
+static inline bool _msm_seamless_for_conn(struct drm_connector *connector,
+		struct drm_connector_state *old_conn_state, bool enable)
+{
+	if (!old_conn_state || !old_conn_state->crtc)
+		return false;
+
+	if (!old_conn_state->crtc->state->mode_changed &&
+			!old_conn_state->crtc->state->active_changed &&
+			old_conn_state->crtc->state->connectors_changed) {
+		if (old_conn_state->crtc == connector->state->crtc)
+			return true;
+	}
+
+	if (enable)
+		return false;
+
+	if (msm_is_mode_seamless(&connector->encoder->crtc->state->mode))
+		return true;
+
+	if (msm_is_mode_seamless_vrr(
+			&connector->encoder->crtc->state->adjusted_mode))
+		return true;
+
+	if (msm_is_mode_seamless_dms(
+			&connector->encoder->crtc->state->adjusted_mode))
+		return true;
+
+	return false;
+}
+
 static void msm_atomic_wait_for_commit_done(
 		struct drm_device *dev,
 		struct drm_atomic_state *old_state)
@@ -174,14 +236,7 @@
 		if (WARN_ON(!encoder))
 			continue;
 
-		if (msm_is_mode_seamless(
-			&connector->encoder->crtc->state->mode) ||
-			msm_is_mode_seamless_vrr(
-			&connector->encoder->crtc->state->adjusted_mode))
-			continue;
-
-		if (msm_is_mode_seamless_dms(
-			&connector->encoder->crtc->state->adjusted_mode))
+		if (_msm_seamless_for_conn(connector, old_conn_state, false))
 			continue;
 
 		funcs = encoder->helper_private;
@@ -223,11 +278,7 @@
 		if (!old_crtc_state->active)
 			continue;
 
-		if (msm_is_mode_seamless(&crtc->state->mode) ||
-			msm_is_mode_seamless_vrr(&crtc->state->adjusted_mode))
-			continue;
-
-		if (msm_is_mode_seamless_dms(&crtc->state->adjusted_mode))
+		if (_msm_seamless_for_crtc(old_state, crtc->state, false))
 			continue;
 
 		funcs = crtc->helper_private;
@@ -286,8 +337,14 @@
 		mode = &new_crtc_state->mode;
 		adjusted_mode = &new_crtc_state->adjusted_mode;
 
-		if (!new_crtc_state->mode_changed)
+		if (!new_crtc_state->mode_changed &&
+				new_crtc_state->connectors_changed) {
+			if (_msm_seamless_for_conn(connector,
+					old_conn_state, false))
+				continue;
+		} else if (!new_crtc_state->mode_changed) {
 			continue;
+		}
 
 		DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
 				 encoder->base.id, encoder->name);
@@ -365,8 +422,7 @@
 		if (!crtc->state->active)
 			continue;
 
-		if (msm_is_mode_seamless(&crtc->state->mode) ||
-			msm_is_mode_seamless_vrr(&crtc->state->adjusted_mode))
+		if (_msm_seamless_for_crtc(old_state, crtc->state, true))
 			continue;
 
 		funcs = crtc->helper_private;
@@ -397,18 +453,24 @@
 				    connector->state->crtc->state))
 			continue;
 
+		if (_msm_seamless_for_conn(connector, old_conn_state, true))
+			continue;
+
 		encoder = connector->state->best_encoder;
 		funcs = encoder->helper_private;
 
 		DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
 				 encoder->base.id, encoder->name);
 
-		blank = MSM_DRM_BLANK_UNBLANK;
-		notifier_data.data = &blank;
-		notifier_data.id =
-			connector->state->crtc->index;
-		msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK,
+		if (connector->state->crtc->state->active_changed) {
+			blank = MSM_DRM_BLANK_UNBLANK;
+			notifier_data.data = &blank;
+			notifier_data.id =
+				connector->state->crtc->index;
+			DRM_DEBUG_ATOMIC("Notify early unblank\n");
+			msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK,
 					    &notifier_data);
+		}
 		/*
 		 * Each encoder has at most one connector (since we always steal
 		 * it away), so we won't call enable hooks twice.
@@ -444,14 +506,20 @@
 				    connector->state->crtc->state))
 			continue;
 
+		if (_msm_seamless_for_conn(connector, old_conn_state, true))
+			continue;
+
 		encoder = connector->state->best_encoder;
 
 		DRM_DEBUG_ATOMIC("bridge enable enabling [ENCODER:%d:%s]\n",
 				 encoder->base.id, encoder->name);
 
 		drm_bridge_enable(encoder->bridge);
-		msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK,
+		if (connector->state->crtc->state->active_changed) {
+			DRM_DEBUG_ATOMIC("Notify unblank\n");
+			msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK,
 					    &notifier_data);
+		}
 	}
 	SDE_ATRACE_END("msm_enable");
 }
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0697db8..0f565d3 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -156,7 +156,8 @@
 	}
 
 	if (reglog)
-		printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
+		dev_dbg(&pdev->dev, "IO:region %s %pK %08lx\n",
+			dbgname, ptr, size);
 
 	return ptr;
 }
@@ -187,7 +188,7 @@
 void msm_writel(u32 data, void __iomem *addr)
 {
 	if (reglog)
-		printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
+		pr_debug("IO:W %pK %08x\n", addr, data);
 	writel(data, addr);
 }
 
@@ -196,7 +197,7 @@
 	u32 val = readl(addr);
 
 	if (reglog)
-		printk(KERN_ERR "IO:R %p %08x\n", addr, val);
+		pr_err("IO:R %pK %08x\n", addr, val);
 	return val;
 }
 
@@ -1024,7 +1025,7 @@
 
 	if (!kms)
 		return -ENXIO;
-	DBG("dev=%p, crtc=%u", dev, pipe);
+	DBG("dev=%pK, crtc=%u", dev, pipe);
 	return vblank_ctrl_queue_work(priv, pipe, true);
 }
 
@@ -1035,7 +1036,7 @@
 
 	if (!kms)
 		return;
-	DBG("dev=%p, crtc=%u", dev, pipe);
+	DBG("dev=%pK, crtc=%u", dev, pipe);
 	vblank_ctrl_queue_work(priv, pipe, false);
 }
 
@@ -1993,6 +1994,7 @@
 		.name   = "msm_drm",
 		.of_match_table = dt_match,
 		.pm     = &msm_pm_ops,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index e8bf244..a1c9d82 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -68,7 +68,7 @@
 	msm_fb = to_msm_framebuffer(fb);
 	n = drm_format_num_planes(fb->pixel_format);
 
-	DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+	DBG("destroy: FB ID: %d (%pK)", fb->base.id, fb);
 
 	drm_framebuffer_cleanup(fb);
 
@@ -336,7 +336,7 @@
 	unsigned int hsub, vsub;
 	bool is_modified = false;
 
-	DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
+	DBG("create framebuffer: dev=%pK, mode_cmd=%pK (%dx%d@%4.4s)",
 			dev, mode_cmd, mode_cmd->width, mode_cmd->height,
 			(char *)&mode_cmd->pixel_format);
 
@@ -420,7 +420,7 @@
 		goto fail;
 	}
 
-	DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+	DBG("create: FB ID: %d (%pK)", fb->base.id, fb);
 
 	return fb;
 
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index ffd4a33..5b886d0 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -142,7 +142,7 @@
 		goto fail_unlock;
 	}
 
-	DBG("fbi=%p, dev=%p", fbi, dev);
+	DBG("fbi=%pK, dev=%pK", fbi, dev);
 
 	fbdev->fb = fb;
 	helper->fb = fb;
@@ -167,7 +167,7 @@
 	fbi->fix.smem_start = paddr;
 	fbi->fix.smem_len = fbdev->bo->size;
 
-	DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+	DBG("par=%pK, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
 	DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
 
 	mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 277b421..ddd4607 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -253,7 +253,7 @@
 
 	pfn = page_to_pfn(pages[pgoff]);
 
-	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+	VERB("Inserting %pK pfn %lx, pa %lx", vmf->virtual_address,
 			pfn, pfn << PAGE_SHIFT);
 
 	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
@@ -428,7 +428,7 @@
 
 	if (!ret && domain) {
 		*iova = domain->iova;
-		if (aspace && aspace->domain_attached)
+		if (aspace && !msm_obj->in_active_list)
 			msm_gem_add_obj_to_aspace_active_list(aspace, obj);
 	} else {
 		obj_remove_domain(domain);
@@ -799,7 +799,7 @@
 		break;
 	}
 
-	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
+	seq_printf(m, "%08x: %c %2d (%2d) %08llx %pK %zu%s\n",
 
 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
 			obj->name, obj->refcount.refcount.counter,
@@ -968,6 +968,7 @@
 	INIT_LIST_HEAD(&msm_obj->domains);
 	INIT_LIST_HEAD(&msm_obj->iova_list);
 	msm_obj->aspace = NULL;
+	msm_obj->in_active_list = false;
 
 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 8521bea..ba01ffb 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -124,6 +124,7 @@
 	struct list_head iova_list;
 
 	struct msm_gem_address_space *aspace;
+	bool in_active_list;
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index d02228a..e5b1cc37 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -71,6 +71,7 @@
 {
 	WARN_ON(!mutex_is_locked(&aspace->dev->struct_mutex));
 	list_move_tail(&msm_obj->iova_list, &aspace->active_list);
+	msm_obj->in_active_list = true;
 }
 
 static void smmu_aspace_remove_from_active(
@@ -84,6 +85,7 @@
 	list_for_each_entry_safe(msm_obj, next, &aspace->active_list,
 			iova_list) {
 		if (msm_obj == obj) {
+			msm_obj->in_active_list = false;
 			list_del(&msm_obj->iova_list);
 			break;
 		}
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index 85867b2..ccd5e20 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -636,6 +636,7 @@
 	.driver = {
 		.name = "msmdrm_smmu",
 		.of_match_table = msm_smmu_dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 07d1ad7..ab1d86b 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -1769,6 +1769,14 @@
 	if (!conn)
 		return;
 
+	/* Panel dead notification can come:
+	 * 1) ESD thread
+	 * 2) Commit thread (if TE stops coming)
+	 * So such case, avoid failure notification twice.
+	 */
+	if (conn->panel_dead)
+		return;
+
 	conn->panel_dead = true;
 	event.type = DRM_EVENT_PANEL_DEAD;
 	event.length = sizeof(bool);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index cefa513..34a3d5f 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -79,6 +79,12 @@
 
 #define MISR_BUFF_SIZE			256
 
+/*
+ * Time period for fps calculation in micro seconds.
+ * Default value is set to 1 sec.
+ */
+#define CRTC_TIME_PERIOD_CALC_FPS_US	1000000
+
 static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
 {
 	struct msm_drm_private *priv;
@@ -125,6 +131,37 @@
 									enable);
 }
 
+/*
+ * sde_crtc_calc_fps() - Calculates fps value.
+ * @sde_crtc   : CRTC structure
+ *
+ * This function is called at frame done. It counts the number
+ * of frames done for every 1 sec. Stores the value in measured_fps.
+ * measured_fps value is 10 times the calculated fps value.
+ * For example, measured_fps= 594 for calculated fps of 59.4
+ */
+static void sde_crtc_calc_fps(struct sde_crtc *sde_crtc)
+{
+	ktime_t current_time_us;
+	u64 fps, diff_us;
+
+	current_time_us = ktime_get();
+	diff_us = (u64)ktime_us_delta(current_time_us,
+			sde_crtc->fps_info.last_sampled_time_us);
+	sde_crtc->fps_info.frame_count++;
+
+	if (diff_us >= CRTC_TIME_PERIOD_CALC_FPS_US) {
+		fps = ((u64)sde_crtc->fps_info.frame_count) * 10000000;
+		do_div(fps, diff_us);
+		sde_crtc->fps_info.measured_fps = (unsigned int)fps;
+		SDE_DEBUG(" FPS for crtc%d is %d.%d\n",
+				sde_crtc->base.base.id, (unsigned int)fps/10,
+				(unsigned int)fps%10);
+		sde_crtc->fps_info.last_sampled_time_us = current_time_us;
+		sde_crtc->fps_info.frame_count = 0;
+	}
+}
+
 /**
  * _sde_crtc_rp_to_crtc - get crtc from resource pool object
  * @rp: Pointer to resource pool
@@ -601,6 +638,50 @@
 		return;
 }
 
+static int _sde_debugfs_fps_status_show(struct seq_file *s, void *data)
+{
+	struct sde_crtc *sde_crtc;
+	unsigned int fps_int, fps_float;
+	ktime_t current_time_us;
+	u64 fps, diff_us;
+
+	if (!s || !s->private) {
+		SDE_ERROR("invalid input param(s)\n");
+		return -EAGAIN;
+	}
+
+	sde_crtc = s->private;
+
+	current_time_us = ktime_get();
+	diff_us = (u64)ktime_us_delta(current_time_us,
+			sde_crtc->fps_info.last_sampled_time_us);
+
+	if (diff_us >= CRTC_TIME_PERIOD_CALC_FPS_US) {
+		fps = ((u64)sde_crtc->fps_info.frame_count) * 10000000;
+		do_div(fps, diff_us);
+		sde_crtc->fps_info.measured_fps = (unsigned int)fps;
+		sde_crtc->fps_info.last_sampled_time_us = current_time_us;
+		sde_crtc->fps_info.frame_count = 0;
+		SDE_DEBUG("Measured FPS for crtc%d is %d.%d\n",
+				sde_crtc->base.base.id, (unsigned int)fps/10,
+				(unsigned int)fps%10);
+	}
+
+	fps_int = (unsigned int) sde_crtc->fps_info.measured_fps;
+	fps_float = do_div(fps_int, 10);
+
+	seq_printf(s, "fps: %d.%d\n", fps_int, fps_float);
+
+	return 0;
+}
+
+
+static int _sde_debugfs_fps_status(struct inode *inode, struct file *file)
+{
+	return single_open(file, _sde_debugfs_fps_status_show,
+			inode->i_private);
+}
+
 static ssize_t vsync_event_show(struct device *device,
 	struct device_attribute *attr, char *buf)
 {
@@ -2299,7 +2380,6 @@
 	struct sde_crtc *sde_crtc;
 	struct sde_kms *sde_kms;
 	unsigned long flags;
-	bool frame_done = false;
 	bool in_clone_mode = false;
 
 	if (!work) {
@@ -2354,10 +2434,6 @@
 			SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event,
 							SDE_EVTLOG_FUNC_CASE3);
 		}
-
-		if (fevent->event & (SDE_ENCODER_FRAME_EVENT_DONE
-					| SDE_ENCODER_FRAME_EVENT_ERROR))
-			frame_done = true;
 	}
 
 	if (fevent->event & SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE) {
@@ -2378,9 +2454,6 @@
 		SDE_ERROR("crtc%d ts:%lld received panel dead event\n",
 				crtc->base.id, ktime_to_ns(fevent->ts));
 
-	if (frame_done)
-		complete_all(&sde_crtc->frame_done_comp);
-
 	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
 	list_add_tail(&fevent->list, &sde_crtc->frame_event_list);
 	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
@@ -3260,10 +3333,10 @@
 			&cstate->property_state);
 }
 
-static int _sde_crtc_wait_for_frame_done(struct drm_crtc *crtc)
+static int _sde_crtc_flush_event_thread(struct drm_crtc *crtc)
 {
 	struct sde_crtc *sde_crtc;
-	int ret, rc = 0, i;
+	int i;
 
 	if (!crtc) {
 		SDE_ERROR("invalid argument\n");
@@ -3287,17 +3360,9 @@
 			kthread_flush_work(&sde_crtc->frame_events[i].work);
 	}
 
-	ret = wait_for_completion_timeout(&sde_crtc->frame_done_comp,
-			msecs_to_jiffies(SDE_FRAME_DONE_TIMEOUT));
-	if (!ret) {
-		SDE_ERROR("frame done completion wait timed out, ret:%d\n",
-				ret);
-		SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FATAL);
-		rc = -ETIMEDOUT;
-	}
 	SDE_EVT32_VERBOSE(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
 
-	return rc;
+	return 0;
 }
 
 static int _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc,
@@ -3624,7 +3689,6 @@
 	struct sde_kms *sde_kms;
 	struct sde_crtc_state *cstate;
 	bool is_error, reset_req;
-	int ret;
 
 	if (!crtc) {
 		SDE_ERROR("invalid argument\n");
@@ -3683,20 +3747,10 @@
 	}
 	sde_crtc->reset_request = reset_req;
 
-	/* wait for frame_event_done completion */
-	SDE_ATRACE_BEGIN("wait_for_frame_done_event");
-	ret = _sde_crtc_wait_for_frame_done(crtc);
-	SDE_ATRACE_END("wait_for_frame_done_event");
-	if (ret) {
-		SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
-				crtc->base.id,
-				atomic_read(&sde_crtc->frame_pending));
-
-		is_error = true;
-
-		/* force offline rotation mode since the commit has no pipes */
-		cstate->sbuf_cfg.rot_op_mode = SDE_CTL_ROT_OP_MODE_OFFLINE;
-	}
+	SDE_ATRACE_BEGIN("flush_event_thread");
+	_sde_crtc_flush_event_thread(crtc);
+	SDE_ATRACE_END("flush_event_thread");
+	sde_crtc_calc_fps(sde_crtc);
 
 	if (atomic_inc_return(&sde_crtc->frame_pending) == 1) {
 		/* acquire bandwidth and other resources */
@@ -3735,7 +3789,6 @@
 		sde_encoder_kickoff(encoder, false);
 	}
 
-	reinit_completion(&sde_crtc->frame_done_comp);
 	SDE_ATRACE_END("crtc_commit");
 	return;
 }
@@ -4136,11 +4189,7 @@
 	if (cstate->num_ds_enabled)
 		sde_crtc->ds_reconfig = true;
 
-	/* wait for frame_event_done completion */
-	if (_sde_crtc_wait_for_frame_done(crtc))
-		SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
-				crtc->base.id,
-				atomic_read(&sde_crtc->frame_pending));
+	_sde_crtc_flush_event_thread(crtc);
 
 	SDE_EVT32(DRMID(crtc), sde_crtc->enabled, sde_crtc->suspend,
 			sde_crtc->vblank_requested,
@@ -4155,6 +4204,8 @@
 	sde_crtc->enabled = false;
 
 	if (atomic_read(&sde_crtc->frame_pending)) {
+		SDE_ERROR("crtc%d frame_pending%d\n", crtc->base.id,
+				atomic_read(&sde_crtc->frame_pending));
 		SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->frame_pending),
 							SDE_EVTLOG_FUNC_CASE2);
 		sde_core_perf_crtc_release_bw(crtc);
@@ -4648,8 +4699,6 @@
 
 	for (i = 1; i < SSPP_MAX; i++) {
 		if (pipe_staged[i]) {
-			sde_plane_clear_multirect(pipe_staged[i]);
-
 			if (is_sde_plane_virtual(pipe_staged[i]->plane)) {
 				SDE_ERROR(
 					"r1 only virt plane:%d not supported\n",
@@ -4657,6 +4706,7 @@
 				rc  = -EINVAL;
 				goto end;
 			}
+			sde_plane_clear_multirect(pipe_staged[i]);
 		}
 	}
 
@@ -4848,7 +4898,7 @@
 {
 	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
 
-	SDE_DEBUG("%s: cancel: %p\n", sde_crtc->name, file);
+	SDE_DEBUG("%s: cancel: %pK\n", sde_crtc->name, file);
 	_sde_crtc_complete_flip(crtc, file);
 }
 
@@ -5692,6 +5742,73 @@
 }
 DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_crtc_debugfs_state);
 
+static int _sde_debugfs_fence_status_show(struct seq_file *s, void *data)
+{
+	struct drm_crtc *crtc;
+	struct drm_plane *plane;
+	struct drm_connector *conn;
+	struct drm_mode_object *drm_obj;
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+	struct sde_fence_context *ctx;
+
+	if (!s || !s->private)
+		return -EINVAL;
+
+	sde_crtc = s->private;
+	crtc = &sde_crtc->base;
+	cstate = to_sde_crtc_state(crtc->state);
+
+	/* Dump input fence info */
+	seq_puts(s, "===Input fence===\n");
+	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		struct sde_plane_state *pstate;
+		struct fence *fence;
+
+		pstate = to_sde_plane_state(plane->state);
+		if (!pstate)
+			continue;
+
+		seq_printf(s, "plane:%u stage:%d\n", plane->base.id,
+			pstate->stage);
+
+		fence = pstate->input_fence;
+		if (fence)
+			sde_fence_list_dump(fence, &s);
+	}
+
+	/* Dump release fence info */
+	seq_puts(s, "\n");
+	seq_puts(s, "===Release fence===\n");
+	ctx = &sde_crtc->output_fence;
+	drm_obj = &crtc->base;
+	sde_debugfs_timeline_dump(ctx, drm_obj, &s);
+	seq_puts(s, "\n");
+
+	/* Dump retire fence info */
+	seq_puts(s, "===Retire fence===\n");
+	drm_for_each_connector(conn, crtc->dev)
+		if (conn->state && conn->state->crtc == crtc &&
+				cstate->num_connectors < MAX_CONNECTORS) {
+			struct sde_connector *c_conn;
+
+			c_conn = to_sde_connector(conn);
+			ctx = &c_conn->retire_fence;
+			drm_obj = &conn->base;
+			sde_debugfs_timeline_dump(ctx, drm_obj, &s);
+		}
+
+	seq_puts(s, "\n");
+
+	return 0;
+}
+
+static int _sde_debugfs_fence_status(struct inode *inode, struct file *file)
+{
+	return single_open(file, _sde_debugfs_fence_status_show,
+				inode->i_private);
+}
+
 static int _sde_crtc_init_debugfs(struct drm_crtc *crtc)
 {
 	struct sde_crtc *sde_crtc;
@@ -5708,6 +5825,14 @@
 		.read =		_sde_crtc_misr_read,
 		.write =	_sde_crtc_misr_setup,
 	};
+	static const struct file_operations debugfs_fence_fops = {
+		.open =		_sde_debugfs_fence_status,
+		.read =		seq_read,
+	};
+	static const struct file_operations debugfs_fps_fops = {
+		.open =		_sde_debugfs_fps_status,
+		.read =		seq_read,
+	};
 
 	if (!crtc)
 		return -EINVAL;
@@ -5732,6 +5857,10 @@
 			&sde_crtc_debugfs_state_fops);
 	debugfs_create_file("misr_data", 0600, sde_crtc->debugfs_root,
 					sde_crtc, &debugfs_misr_fops);
+	debugfs_create_file("fence_status", 0400, sde_crtc->debugfs_root,
+					sde_crtc, &debugfs_fence_fops);
+	debugfs_create_file("fps", 0400, sde_crtc->debugfs_root,
+					sde_crtc, &debugfs_fps_fops);
 
 	return 0;
 }
@@ -5935,7 +6064,6 @@
 	mutex_init(&sde_crtc->rp_lock);
 	INIT_LIST_HEAD(&sde_crtc->rp_head);
 
-	init_completion(&sde_crtc->frame_done_comp);
 	sde_crtc->enabled = false;
 
 	INIT_LIST_HEAD(&sde_crtc->frame_event_list);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 53b8df9..99177b1 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -136,6 +136,12 @@
 	void *usr;
 };
 
+struct sde_crtc_fps_info {
+	u32 frame_count;
+	ktime_t last_sampled_time_us;
+	u32 measured_fps;
+};
+
 /*
  * Maximum number of free event structures to cache
  */
@@ -184,7 +190,6 @@
  * @frame_events  : static allocation of in-flight frame events
  * @frame_event_list : available frame event list
  * @spin_lock     : spin lock for frame event, transaction status, etc...
- * @frame_done_comp    : for frame_event_done synchronization
  * @event_thread  : Pointer to event handler thread
  * @event_worker  : Event worker queue
  * @event_cache   : Local cache of event worker structures
@@ -232,6 +237,7 @@
 	u64 play_count;
 	ktime_t vblank_cb_time;
 	ktime_t vblank_last_cb_time;
+	struct sde_crtc_fps_info fps_info;
 	struct device *sysfs_dev;
 	struct kernfs_node *vsync_event_sf;
 	bool vblank_requested;
@@ -255,7 +261,6 @@
 	struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE];
 	struct list_head frame_event_list;
 	spinlock_t spin_lock;
-	struct completion frame_done_comp;
 
 	/* for handling internal event thread */
 	struct sde_crtc_event event_cache[SDE_CRTC_MAX_EVENT_COUNT];
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 2ce8b2e..73864b6 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -738,8 +738,8 @@
 	mutex_destroy(&sde_enc->enc_lock);
 
 	if (sde_enc->input_handler) {
-		input_unregister_handler(sde_enc->input_handler);
 		kfree(sde_enc->input_handler);
+		sde_enc->input_handler = NULL;
 	}
 
 	kfree(sde_enc);
@@ -1489,12 +1489,13 @@
 
 		vsync_cfg.pp_count = sde_enc->num_phys_encs;
 		vsync_cfg.frame_rate = mode_info.frame_rate;
+		vsync_cfg.vsync_source =
+			sde_enc->cur_master->hw_pp->caps->te_source;
 		if (is_dummy)
 			vsync_cfg.vsync_source = SDE_VSYNC_SOURCE_WD_TIMER_1;
 		else if (disp_info->is_te_using_watchdog_timer)
 			vsync_cfg.vsync_source = SDE_VSYNC_SOURCE_WD_TIMER_0;
-		else
-			vsync_cfg.vsync_source = SDE_VSYNC0_SOURCE_GPIO;
+
 		vsync_cfg.is_dummy = is_dummy;
 
 		hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
@@ -1887,7 +1888,7 @@
 {
 	struct drm_encoder *drm_enc = NULL;
 	struct sde_encoder_virt *sde_enc = NULL;
-	struct msm_drm_thread *disp_thread = NULL;
+	struct msm_drm_thread *event_thread = NULL;
 	struct msm_drm_private *priv = NULL;
 
 	if (!handle || !handle->handler || !handle->handler->private) {
@@ -1904,7 +1905,7 @@
 	priv = drm_enc->dev->dev_private;
 	sde_enc = to_sde_encoder_virt(drm_enc);
 	if (!sde_enc->crtc || (sde_enc->crtc->index
-			>= ARRAY_SIZE(priv->disp_thread))) {
+			>= ARRAY_SIZE(priv->event_thread))) {
 		SDE_DEBUG_ENC(sde_enc,
 			"invalid cached CRTC: %d or crtc index: %d\n",
 			sde_enc->crtc == NULL,
@@ -1914,9 +1915,10 @@
 
 	SDE_EVT32_VERBOSE(DRMID(drm_enc));
 
-	disp_thread = &priv->disp_thread[sde_enc->crtc->index];
+	event_thread = &priv->event_thread[sde_enc->crtc->index];
 
-	kthread_queue_work(&disp_thread->worker,
+	/* Queue input event work to event thread */
+	kthread_queue_work(&event_thread->worker,
 				&sde_enc->input_event_work);
 }
 
@@ -2510,6 +2512,109 @@
 	}
 }
 
+static int _sde_encoder_input_connect(struct input_handler *handler,
+	struct input_dev *dev, const struct input_device_id *id)
+{
+	struct input_handle *handle;
+	int rc = 0;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = handler->name;
+
+	rc = input_register_handle(handle);
+	if (rc) {
+		pr_err("failed to register input handle\n");
+		goto error;
+	}
+
+	rc = input_open_device(handle);
+	if (rc) {
+		pr_err("failed to open input device\n");
+		goto error_unregister;
+	}
+
+	return 0;
+
+error_unregister:
+	input_unregister_handle(handle);
+
+error:
+	kfree(handle);
+
+	return rc;
+}
+
+static void _sde_encoder_input_disconnect(struct input_handle *handle)
+{
+	 input_close_device(handle);
+	 input_unregister_handle(handle);
+	 kfree(handle);
+}
+
+/**
+ * Structure for specifying event parameters on which to receive callbacks.
+ * This structure will trigger a callback in case of a touch event (specified by
+ * EV_ABS) where there is a change in X and Y coordinates,
+ */
+static const struct input_device_id sde_input_ids[] = {
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+		.evbit = { BIT_MASK(EV_ABS) },
+		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+					BIT_MASK(ABS_MT_POSITION_X) |
+					BIT_MASK(ABS_MT_POSITION_Y) },
+	},
+	{ },
+};
+
+static int _sde_encoder_input_handler_register(
+		struct input_handler *input_handler)
+{
+	int rc = 0;
+
+	rc = input_register_handler(input_handler);
+	if (rc) {
+		pr_err("input_register_handler failed, rc= %d\n", rc);
+		kfree(input_handler);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int _sde_encoder_input_handler(
+		struct sde_encoder_virt *sde_enc)
+{
+	struct input_handler *input_handler = NULL;
+	int rc = 0;
+
+	if (sde_enc->input_handler) {
+		SDE_ERROR_ENC(sde_enc,
+				"input_handle is active. unexpected\n");
+		return -EINVAL;
+	}
+
+	input_handler = kzalloc(sizeof(*sde_enc->input_handler), GFP_KERNEL);
+	if (!input_handler)
+		return -ENOMEM;
+
+	input_handler->event = sde_encoder_input_event_handler;
+	input_handler->connect = _sde_encoder_input_connect;
+	input_handler->disconnect = _sde_encoder_input_disconnect;
+	input_handler->name = "sde";
+	input_handler->id_table = sde_input_ids;
+	input_handler->private = sde_enc;
+
+	sde_enc->input_handler = input_handler;
+
+	return rc;
+}
+
 static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc = NULL;
@@ -2584,12 +2689,14 @@
 	struct msm_compression_info *comp_info = NULL;
 	struct drm_display_mode *cur_mode = NULL;
 	struct msm_mode_info mode_info;
+	struct msm_display_info *disp_info;
 
 	if (!drm_enc) {
 		SDE_ERROR("invalid encoder\n");
 		return;
 	}
 	sde_enc = to_sde_encoder_virt(drm_enc);
+	disp_info = &sde_enc->disp_info;
 
 	if (!sde_kms_power_resource_is_enabled(drm_enc->dev)) {
 		SDE_ERROR("power resource is not enabled\n");
@@ -2627,6 +2734,14 @@
 		return;
 	}
 
+	if (sde_enc->input_handler) {
+		ret = _sde_encoder_input_handler_register(
+				sde_enc->input_handler);
+		if (ret)
+			SDE_ERROR(
+			"input handler registration failed, rc = %d\n", ret);
+	}
+
 	ret = sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF);
 	if (ret) {
 		SDE_ERROR_ENC(sde_enc, "sde resource control failed: %d\n",
@@ -2705,6 +2820,11 @@
 	/* wait for idle */
 	sde_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
 
+	kthread_flush_work(&sde_enc->input_event_work);
+
+	if (sde_enc->input_handler)
+		input_unregister_handler(sde_enc->input_handler);
+
 	/*
 	 * For primary command mode encoders, execute the resource control
 	 * pre-stop operations before the physical encoders are disabled, to
@@ -3633,102 +3753,6 @@
 			SDE_ENC_RC_EVENT_EARLY_WAKEUP);
 }
 
-static int _sde_encoder_input_connect(struct input_handler *handler,
-	struct input_dev *dev, const struct input_device_id *id)
-{
-	struct input_handle *handle;
-	int rc = 0;
-
-	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
-	if (!handle)
-		return -ENOMEM;
-
-	handle->dev = dev;
-	handle->handler = handler;
-	handle->name = handler->name;
-
-	rc = input_register_handle(handle);
-	if (rc) {
-		pr_err("failed to register input handle\n");
-		goto error;
-	}
-
-	rc = input_open_device(handle);
-	if (rc) {
-		pr_err("failed to open input device\n");
-		goto error_unregister;
-	}
-
-	return 0;
-
-error_unregister:
-	input_unregister_handle(handle);
-
-error:
-	kfree(handle);
-
-	return rc;
-}
-
-static void _sde_encoder_input_disconnect(struct input_handle *handle)
-{
-	 input_close_device(handle);
-	 input_unregister_handle(handle);
-	 kfree(handle);
-}
-
-/**
- * Structure for specifying event parameters on which to receive callbacks.
- * This structure will trigger a callback in case of a touch event (specified by
- * EV_ABS) where there is a change in X and Y coordinates,
- */
-static const struct input_device_id sde_input_ids[] = {
-	{
-		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
-		.evbit = { BIT_MASK(EV_ABS) },
-		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
-					BIT_MASK(ABS_MT_POSITION_X) |
-					BIT_MASK(ABS_MT_POSITION_Y) },
-	},
-	{ },
-};
-
-static int _sde_encoder_input_handler(
-		struct sde_encoder_virt *sde_enc)
-{
-	struct input_handler *input_handler = NULL;
-	int rc = 0;
-
-	if (sde_enc->input_handler) {
-		SDE_ERROR_ENC(sde_enc,
-				"input_handle is active. unexpected\n");
-		return -EINVAL;
-	}
-
-	input_handler = kzalloc(sizeof(*sde_enc->input_handler), GFP_KERNEL);
-	if (!input_handler)
-		return -ENOMEM;
-
-	input_handler->event = sde_encoder_input_event_handler;
-	input_handler->connect = _sde_encoder_input_connect;
-	input_handler->disconnect = _sde_encoder_input_disconnect;
-	input_handler->name = "sde";
-	input_handler->id_table = sde_input_ids;
-	input_handler->private = sde_enc;
-
-	rc = input_register_handler(input_handler);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc,
-			"input_register_handler failed, rc= %d\n", rc);
-		kfree(input_handler);
-		return rc;
-	}
-
-	sde_enc->input_handler = input_handler;
-
-	return rc;
-}
-
 static void sde_encoder_vsync_event_work_handler(struct kthread_work *work)
 {
 	struct sde_encoder_virt *sde_enc = container_of(work,
@@ -3986,6 +4010,7 @@
 	}
 
 	if (sde_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
+			sde_enc->disp_info.is_primary &&
 			!_sde_encoder_wakeup_time(drm_enc, &wakeup_time)) {
 		SDE_EVT32_VERBOSE(ktime_to_ms(wakeup_time));
 		mod_timer(&sde_enc->vsync_event_timer,
@@ -4878,7 +4903,7 @@
 
 int sde_encoder_display_failure_notification(struct drm_encoder *enc)
 {
-	struct msm_drm_thread *disp_thread = NULL;
+	struct msm_drm_thread *event_thread = NULL;
 	struct msm_drm_private *priv = NULL;
 	struct sde_encoder_virt *sde_enc = NULL;
 
@@ -4890,7 +4915,7 @@
 	priv = enc->dev->dev_private;
 	sde_enc = to_sde_encoder_virt(enc);
 	if (!sde_enc->crtc || (sde_enc->crtc->index
-			>= ARRAY_SIZE(priv->disp_thread))) {
+			>= ARRAY_SIZE(priv->event_thread))) {
 		SDE_DEBUG_ENC(sde_enc,
 			"invalid cached CRTC: %d or crtc index: %d\n",
 			sde_enc->crtc == NULL,
@@ -4900,15 +4925,12 @@
 
 	SDE_EVT32_VERBOSE(DRMID(enc));
 
-	disp_thread = &priv->disp_thread[sde_enc->crtc->index];
-	if (current->tgid == disp_thread->thread->tgid) {
-		sde_encoder_resource_control(&sde_enc->base,
-					     SDE_ENC_RC_EVENT_KICKOFF);
-	} else {
-		kthread_queue_work(&disp_thread->worker,
-				   &sde_enc->esd_trigger_work);
-		kthread_flush_work(&sde_enc->esd_trigger_work);
-	}
+	event_thread = &priv->event_thread[sde_enc->crtc->index];
+
+	kthread_queue_work(&event_thread->worker,
+			   &sde_enc->esd_trigger_work);
+	kthread_flush_work(&sde_enc->esd_trigger_work);
+
 	/**
 	 * panel may stop generating te signal (vsync) during esd failure. rsc
 	 * hardware may hang without vsync. Avoid rsc hang by generating the
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index bad608d..82dd64a 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -462,7 +462,7 @@
 	phys_enc->in_clone_mode = false;
 
 	/* Check if WB has CWB support */
-	if (!(wb_cfg->features & SDE_WB_HAS_CWB))
+	if (!(wb_cfg->features & BIT(SDE_WB_HAS_CWB)))
 		return;
 
 	/* Count the number of connectors on the given crtc */
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index 1f30a5c..8ffbb98 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -431,3 +431,55 @@
 		obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
 		ctx->commit_count);
 }
+
+void sde_fence_list_dump(struct fence *fence, struct seq_file **s)
+{
+	char timeline_str[TIMELINE_VAL_LENGTH];
+
+	if (fence->ops->timeline_value_str)
+		fence->ops->timeline_value_str(fence,
+		timeline_str, TIMELINE_VAL_LENGTH);
+
+	seq_printf(*s, "fence name:%s timeline name:%s seqno:0x%x timeline:%s signaled:0x%x\n",
+		fence->ops->get_driver_name(fence),
+		fence->ops->get_timeline_name(fence),
+		fence->seqno, timeline_str,
+		fence->ops->signaled ?
+		fence->ops->signaled(fence) : 0xffffffff);
+}
+
+void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
+		struct drm_mode_object *drm_obj, struct seq_file **s)
+{
+	char *obj_name;
+	struct sde_fence *fc, *next;
+	struct fence *fence;
+
+	if (!ctx || !drm_obj) {
+		SDE_ERROR("invalid input params\n");
+		return;
+	}
+
+	switch (drm_obj->type) {
+	case DRM_MODE_OBJECT_CRTC:
+		obj_name = "crtc";
+		break;
+	case DRM_MODE_OBJECT_CONNECTOR:
+		obj_name = "connector";
+		break;
+	default:
+		obj_name = "unknown";
+		break;
+	}
+
+	seq_printf(*s, "drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
+		obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
+		ctx->commit_count);
+
+	spin_lock(&ctx->list_lock);
+	list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
+		fence = &fc->base;
+		sde_fence_list_dump(fence, s);
+	}
+	spin_unlock(&ctx->list_lock);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.h b/drivers/gpu/drm/msm/sde/sde_fence.h
index 7891be4..7d7fd02 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.h
+++ b/drivers/gpu/drm/msm/sde/sde_fence.h
@@ -153,6 +153,22 @@
 void sde_fence_timeline_status(struct sde_fence_context *ctx,
 					struct drm_mode_object *drm_obj);
 
+/**
+ * sde_fence_timeline_dump - utility to dump fence list info in debugfs node
+ * @fence: Pointer fence container
+ * @drm_obj: Pointer to drm object associated with fence timeline
+ * @s: used to writing on debugfs node
+ */
+void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
+		struct drm_mode_object *drm_obj, struct seq_file **s);
+
+/**
+ * sde_fence_timeline_status - dumps fence timeline in debugfs node
+ * @fence: Pointer fence container
+ * @s: used to writing on debugfs node
+ */
+void sde_fence_list_dump(struct fence *fence, struct seq_file **s);
+
 #else
 static inline void *sde_sync_get(uint64_t fd)
 {
@@ -212,6 +228,18 @@
 {
 	/* do nothing */
 }
+
+void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
+		struct drm_mode_object *drm_obj, struct seq_file **s)
+{
+	/* do nothing */
+}
+
+void sde_fence_list_dump(struct fence *fence, struct seq_file **s)
+{
+	/* do nothing */
+}
+
 #endif /* IS_ENABLED(CONFIG_SW_SYNC) */
 
 #endif /* _SDE_FENCE_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 475f8d0..baec526 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -249,6 +249,7 @@
 	DITHER_OFF,
 	DITHER_LEN,
 	DITHER_VER,
+	TE_SOURCE,
 	PP_PROP_MAX,
 };
 
@@ -589,6 +590,7 @@
 	{DITHER_OFF, "qcom,sde-dither-off", false, PROP_TYPE_U32_ARRAY},
 	{DITHER_LEN, "qcom,sde-dither-size", false, PROP_TYPE_U32},
 	{DITHER_VER, "qcom,sde-dither-version", false, PROP_TYPE_U32},
+	{TE_SOURCE, "qcom,sde-te-source", false, PROP_TYPE_U32_ARRAY},
 };
 
 static struct sde_prop_type dsc_prop[] = {
@@ -2628,6 +2630,10 @@
 		snprintf(pp->name, SDE_HW_BLK_NAME_LEN, "pingpong_%u",
 				pp->id - PINGPONG_0);
 		pp->len = PROP_VALUE_ACCESS(prop_value, PP_LEN, 0);
+		pp->te_source = PROP_VALUE_ACCESS(prop_value, TE_SOURCE, i);
+		if (!prop_exists[TE_SOURCE] ||
+			pp->te_source > SDE_VSYNC_SOURCE_WD_TIMER_0)
+			pp->te_source = SDE_VSYNC0_SOURCE_GPIO;
 
 		sblk->te.base = PROP_VALUE_ACCESS(prop_value, TE_OFF, i);
 		sblk->te.id = SDE_PINGPONG_TE;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 65919e9..52bdc78 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -653,6 +653,7 @@
  */
 struct sde_pingpong_cfg  {
 	SDE_HW_BLK_INFO;
+	u32 te_source;
 	const struct sde_pingpong_sub_blks *sblk;
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
index 9e64d78..c7989cd 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
@@ -680,7 +680,7 @@
 	void  __iomem *base;
 
 	if (!hw_cfg  || (hw_cfg->len != sizeof(*pcc)  && hw_cfg->payload)) {
-		DRM_ERROR("invalid params hw %p payload %p payloadsize %d \"\
+		DRM_ERROR("invalid params hw %pK payload %pK payloadsize %d \"\
 			  exp size %zd\n",
 			   hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
 			   ((hw_cfg) ? hw_cfg->len : 0), sizeof(*pcc));
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index b0a52a7..c2fffef 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -2151,8 +2151,8 @@
 		aspace->mmu->funcs->attach(mmu, (const char **)iommu_ports,
 			ARRAY_SIZE(iommu_ports));
 
-		msm_gem_aspace_domain_attach_detach_update(aspace, false);
 		aspace->domain_attached = true;
+		msm_gem_aspace_domain_attach_detach_update(aspace, false);
 	}
 
 	return 0;
@@ -2664,21 +2664,22 @@
 
 	mutex_lock(&dev->mode_config.mutex);
 	connector_list = &dev->mode_config.connector_list;
-	list_for_each_entry(conn_iter, connector_list, head) {
-		/**
-		 * SDE_KMS doesn't attach more than one encoder to
-		 * a DSI connector. So it is safe to check only with the
-		 * first encoder entry. Revisit this logic if we ever have
-		 * to support continuous splash for external displays in MST
-		 * configuration.
-		 */
-		if (conn_iter &&
-			(conn_iter->encoder_ids[0] == encoder->base.id)) {
-			connector = conn_iter;
-			break;
+	if (connector_list) {
+		list_for_each_entry(conn_iter, connector_list, head) {
+			/**
+			 * SDE_KMS doesn't attach more than one encoder to
+			 * a DSI connector. So it is safe to check only with
+			 * the first encoder entry. Revisit this logic if we
+			 * ever have to support continuous splash for
+			 * external displays in MST configuration.
+			 */
+			if (conn_iter &&
+			  (conn_iter->encoder_ids[0] == encoder->base.id)) {
+				connector = conn_iter;
+				break;
+			}
 		}
 	}
-
 	if (!connector) {
 		SDE_ERROR("connector not initialized\n");
 		mutex_unlock(&dev->mode_config.mutex);
@@ -3195,7 +3196,7 @@
 		sde_kms->mmio = NULL;
 		goto error;
 	}
-	DRM_INFO("mapped mdp address space @%p\n", sde_kms->mmio);
+	DRM_INFO("mapped mdp address space @%pK\n", sde_kms->mmio);
 	sde_kms->mmio_len = msm_iomap_size(dev->platformdev, "mdp_phys");
 
 	rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio,
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 2e46599..dc55ad5 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -3449,6 +3449,25 @@
 				src_w, src_h);
 			return -EINVAL;
 		}
+
+		/*
+		 * SSPP fetch , unpack output and QSEED3 input lines need
+		 * to match for Y plane
+		 */
+		if (i == 0 &&
+			(sde_plane_get_property(pstate, PLANE_PROP_SRC_CONFIG) &
+			BIT(SDE_DRM_DEINTERLACE)) &&
+			((pstate->scaler3_cfg.src_height[i] != (src_h/2)) ||
+			(pstate->pixel_ext.roi_h[i] != (src_h/2)))) {
+			SDE_ERROR_PLANE(psde,
+				"de-interlace fail roi[%d] %d/%d, src %dx%d, src %dx%d\n",
+				i, pstate->pixel_ext.roi_w[i],
+				pstate->pixel_ext.roi_h[i],
+				pstate->scaler3_cfg.src_width[i],
+				pstate->scaler3_cfg.src_height[i],
+				src_w, src_h);
+			return -EINVAL;
+		}
 	}
 
 	pstate->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V2;
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c
index 71c8b63..61588bd9 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_wb.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -816,6 +816,7 @@
 	.driver = {
 		.name = "sde_wb",
 		.of_match_table = dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 5829095..758cc53 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -169,6 +169,11 @@
 	struct vbif_debug_bus_entry *entries;
 };
 
+struct sde_dbg_dsi_debug_bus {
+	u32 *entries;
+	u32 size;
+};
+
 /**
  * struct sde_dbg_base - global sde debug base structure
  * @evtlog: event log instance
@@ -202,6 +207,7 @@
 
 	struct sde_dbg_sde_debug_bus dbgbus_sde;
 	struct sde_dbg_vbif_debug_bus dbgbus_vbif_rt;
+	struct sde_dbg_dsi_debug_bus dbgbus_dsi;
 	bool dump_all;
 	bool dsi_dbg_bus;
 	u32 debugfs_ctrl;
@@ -2038,6 +2044,42 @@
 	{0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
 };
 
+static u32 dsi_dbg_bus_sdm845[] = {
+	0x0001, 0x1001, 0x0001, 0x0011,
+	0x1021, 0x0021, 0x0031, 0x0041,
+	0x0051, 0x0061, 0x3061, 0x0061,
+	0x2061, 0x2061, 0x1061, 0x1061,
+	0x1061, 0x0071, 0x0071, 0x0071,
+	0x0081, 0x0081, 0x00A1, 0x00A1,
+	0x10A1, 0x20A1, 0x30A1, 0x10A1,
+	0x10A1, 0x30A1, 0x20A1, 0x00B1,
+	0x00C1, 0x00C1, 0x10C1, 0x20C1,
+	0x30C1, 0x00D1, 0x00D1, 0x20D1,
+	0x30D1, 0x00E1, 0x00E1, 0x00E1,
+	0x00F1, 0x00F1, 0x0101, 0x0101,
+	0x1101, 0x2101, 0x3101, 0x0111,
+	0x0141, 0x1141, 0x0141, 0x1141,
+	0x1141, 0x0151, 0x0151, 0x1151,
+	0x2151, 0x3151, 0x0161, 0x0161,
+	0x1161, 0x0171, 0x0171, 0x0181,
+	0x0181, 0x0191, 0x0191, 0x01A1,
+	0x01A1, 0x01B1, 0x01B1, 0x11B1,
+	0x21B1, 0x01C1, 0x01C1, 0x11C1,
+	0x21C1, 0x31C1, 0x01D1, 0x01D1,
+	0x01D1, 0x01D1, 0x11D1, 0x21D1,
+	0x21D1, 0x01E1, 0x01E1, 0x01F1,
+	0x01F1, 0x0201, 0x0201, 0x0211,
+	0x0221, 0x0231, 0x0241, 0x0251,
+	0x0281, 0x0291, 0x0281, 0x0291,
+	0x02A1, 0x02B1, 0x02C1, 0x0321,
+	0x0321, 0x1321, 0x2321, 0x3321,
+	0x0331, 0x0331, 0x1331, 0x0341,
+	0x0341, 0x1341, 0x2341, 0x3341,
+	0x0351, 0x0361, 0x0361, 0x1361,
+	0x2361, 0x0371, 0x0381, 0x0391,
+	0x03C1, 0x03D1, 0x03E1, 0x03F1,
+};
+
 /**
  * _sde_dbg_enable_power - use callback to turn power on for hw register access
  * @enable: whether to turn power on or off
@@ -2603,7 +2645,8 @@
 		_sde_dbg_dump_vbif_dbg_bus(&sde_dbg_base.dbgbus_vbif_rt);
 
 	if (sde_dbg_base.dsi_dbg_bus || dump_all)
-		dsi_ctrl_debug_dump();
+		dsi_ctrl_debug_dump(sde_dbg_base.dbgbus_dsi.entries,
+				    sde_dbg_base.dbgbus_dsi.size);
 
 	if (do_panic && sde_dbg_base.panic_on_err)
 		panic(name);
@@ -3355,6 +3398,8 @@
 		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
 		dbg->dbgbus_vbif_rt.cmn.entries_size =
 				ARRAY_SIZE(vbif_dbg_bus_msm8998);
+		dbg->dbgbus_dsi.entries = NULL;
+		dbg->dbgbus_dsi.size = 0;
 	} else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
 		dbg->dbgbus_sde.entries = dbg_bus_sde_sdm845;
 		dbg->dbgbus_sde.cmn.entries_size =
@@ -3365,6 +3410,8 @@
 		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
 		dbg->dbgbus_vbif_rt.cmn.entries_size =
 				ARRAY_SIZE(vbif_dbg_bus_msm8998);
+		dbg->dbgbus_dsi.entries = dsi_dbg_bus_sdm845;
+		dbg->dbgbus_dsi.size = ARRAY_SIZE(dsi_dbg_bus_sdm845);
 	} else {
 		pr_err("unsupported chipset id %X\n", hwversion);
 	}
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
index 9efb893..2921a38 100644
--- a/drivers/gpu/drm/msm/sde_dbg.h
+++ b/drivers/gpu/drm/msm/sde_dbg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -344,8 +344,10 @@
 
 /**
  * dsi_ctrl_debug_dump - dump dsi debug dump status
+ * @entries:	array of debug bus control values
+ * @size:	size of the debug bus control array
  */
-void dsi_ctrl_debug_dump(void);
+void dsi_ctrl_debug_dump(u32 *entries, u32 size);
 
 #else
 static inline struct sde_dbg_evtlog *sde_evtlog_init(void)
@@ -437,7 +439,7 @@
 {
 }
 
-static inline void dsi_ctrl_debug_dump(void)
+static inline void dsi_ctrl_debug_dump(u32 entries, u32 size)
 {
 }
 
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index 8179b10..721e278 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -579,8 +579,12 @@
 			msecs_to_jiffies(PRIMARY_VBLANK_WORST_CASE_MS*2));
 		if (!rc) {
 			pr_err("Timeout waiting for vsync\n");
-			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait),
+			rc = -ETIMEDOUT;
+			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc,
 				SDE_EVTLOG_ERROR);
+		} else {
+			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc);
+			rc = 0;
 		}
 	}
 end:
@@ -635,8 +639,12 @@
 			msecs_to_jiffies(PRIMARY_VBLANK_WORST_CASE_MS*2));
 		if (!rc) {
 			pr_err("Timeout waiting for vsync\n");
-			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait),
+			rc = -ETIMEDOUT;
+			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc,
 				SDE_EVTLOG_ERROR);
+		} else {
+			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc);
+			rc = 0;
 		}
 	}
 
@@ -1446,6 +1454,7 @@
 	.driver     = {
 		.name   = "sde_rsc",
 		.of_match_table = dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
index e2faccf..d66e0e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
@@ -46,8 +46,8 @@
 	0x00000000,
 	0x00000000,
 	0x584d454d,
-	0x00000756,
-	0x00000748,
+	0x00000754,
+	0x00000746,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -68,8 +68,8 @@
 	0x00000000,
 	0x00000000,
 	0x46524550,
-	0x0000075a,
 	0x00000758,
+	0x00000756,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -90,8 +90,8 @@
 	0x00000000,
 	0x00000000,
 	0x5f433249,
-	0x00000b8a,
-	0x00000a2d,
+	0x00000b88,
+	0x00000a2b,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -112,8 +112,8 @@
 	0x00000000,
 	0x00000000,
 	0x54534554,
-	0x00000bb3,
-	0x00000b8c,
+	0x00000bb1,
+	0x00000b8a,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -134,8 +134,8 @@
 	0x00000000,
 	0x00000000,
 	0x454c4449,
-	0x00000bbf,
 	0x00000bbd,
+	0x00000bbb,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -236,19 +236,19 @@
 	0x000005d3,
 	0x00000003,
 	0x00000002,
-	0x0000069d,
+	0x0000069b,
 	0x00040004,
 	0x00000000,
-	0x000006b9,
+	0x000006b7,
 	0x00010005,
 	0x00000000,
-	0x000006d6,
+	0x000006d4,
 	0x00010006,
 	0x00000000,
 	0x0000065b,
 	0x00000007,
 	0x00000000,
-	0x000006e1,
+	0x000006df,
 /* 0x03c4: memx_func_tail */
 /* 0x03c4: memx_ts_start */
 	0x00000000,
@@ -1372,432 +1372,432 @@
 /* 0x065b: memx_func_wait_vblank */
 	0x9800f840,
 	0x66b00016,
-	0x130bf400,
+	0x120bf400,
 	0xf40166b0,
 	0x0ef4060b,
 /* 0x066d: memx_func_wait_vblank_head1 */
-	0x2077f12e,
-	0x070ef400,
-/* 0x0674: memx_func_wait_vblank_head0 */
-	0x000877f1,
-/* 0x0678: memx_func_wait_vblank_0 */
-	0x07c467f1,
-	0xcf0664b6,
-	0x67fd0066,
-	0xf31bf404,
-/* 0x0688: memx_func_wait_vblank_1 */
-	0x07c467f1,
-	0xcf0664b6,
-	0x67fd0066,
-	0xf30bf404,
-/* 0x0698: memx_func_wait_vblank_fini */
-	0xf80410b6,
-/* 0x069d: memx_func_wr32 */
-	0x00169800,
-	0xb6011598,
-	0x60f90810,
-	0xd0fc50f9,
-	0x21f4e0fc,
-	0x0242b640,
-	0xf8e91bf4,
-/* 0x06b9: memx_func_wait */
-	0x2c87f000,
-	0xcf0684b6,
-	0x1e980088,
-	0x011d9800,
-	0x98021c98,
-	0x10b6031b,
-	0xa321f410,
-/* 0x06d6: memx_func_delay */
-	0x1e9800f8,
-	0x0410b600,
-	0xf87e21f4,
-/* 0x06e1: memx_func_train */
-/* 0x06e3: memx_exec */
-	0xf900f800,
-	0xb9d0f9e0,
-	0xb2b902c1,
-/* 0x06ed: memx_exec_next */
-	0x00139802,
-	0xe70410b6,
-	0xe701f034,
-	0xb601e033,
-	0x30f00132,
-	0xde35980c,
-	0x12b855f9,
-	0xe41ef406,
-	0x98f10b98,
-	0xcbbbf20c,
-	0xc4b7f102,
-	0x06b4b607,
-	0xfc00bbcf,
-	0xf5e0fcd0,
-	0xf8033621,
-/* 0x0729: memx_info */
-	0x01c67000,
-/* 0x072f: memx_info_data */
-	0xf10e0bf4,
-	0xf103ccc7,
-	0xf40800b7,
-/* 0x073a: memx_info_train */
-	0xc7f10b0e,
-	0xb7f10bcc,
-/* 0x0742: memx_info_send */
-	0x21f50100,
-	0x00f80336,
-/* 0x0748: memx_recv */
-	0xf401d6b0,
-	0xd6b0980b,
-	0xd80bf400,
-/* 0x0756: memx_init */
+	0x2077f02c,
+/* 0x0673: memx_func_wait_vblank_head0 */
+	0xf0060ef4,
+/* 0x0676: memx_func_wait_vblank_0 */
+	0x67f10877,
+	0x64b607c4,
+	0x0066cf06,
+	0xf40467fd,
+/* 0x0686: memx_func_wait_vblank_1 */
+	0x67f1f31b,
+	0x64b607c4,
+	0x0066cf06,
+	0xf40467fd,
+/* 0x0696: memx_func_wait_vblank_fini */
+	0x10b6f30b,
+/* 0x069b: memx_func_wr32 */
+	0x9800f804,
+	0x15980016,
+	0x0810b601,
+	0x50f960f9,
+	0xe0fcd0fc,
+	0xb64021f4,
+	0x1bf40242,
+/* 0x06b7: memx_func_wait */
+	0xf000f8e9,
+	0x84b62c87,
+	0x0088cf06,
+	0x98001e98,
+	0x1c98011d,
+	0x031b9802,
+	0xf41010b6,
+	0x00f8a321,
+/* 0x06d4: memx_func_delay */
+	0xb6001e98,
+	0x21f40410,
+/* 0x06df: memx_func_train */
+	0xf800f87e,
+/* 0x06e1: memx_exec */
+	0xf9e0f900,
+	0x02c1b9d0,
+/* 0x06eb: memx_exec_next */
+	0x9802b2b9,
+	0x10b60013,
+	0xf034e704,
+	0xe033e701,
+	0x0132b601,
+	0x980c30f0,
+	0x55f9de35,
+	0xf40612b8,
+	0x0b98e41e,
+	0xf20c98f1,
+	0xf102cbbb,
+	0xb607c4b7,
+	0xbbcf06b4,
+	0xfcd0fc00,
+	0x3621f5e0,
+/* 0x0727: memx_info */
+	0x7000f803,
+	0x0bf401c6,
+/* 0x072d: memx_info_data */
+	0xccc7f10e,
+	0x00b7f103,
+	0x0b0ef408,
+/* 0x0738: memx_info_train */
+	0x0bccc7f1,
+	0x0100b7f1,
+/* 0x0740: memx_info_send */
+	0x033621f5,
+/* 0x0746: memx_recv */
+	0xd6b000f8,
+	0x980bf401,
+	0xf400d6b0,
+	0x00f8d80b,
+/* 0x0754: memx_init */
+/* 0x0756: perf_recv */
 	0x00f800f8,
-/* 0x0758: perf_recv */
-/* 0x075a: perf_init */
-	0x00f800f8,
-/* 0x075c: i2c_drive_scl */
-	0xf40036b0,
-	0x07f1110b,
-	0x04b607e0,
-	0x0001d006,
-	0x00f804bd,
-/* 0x0770: i2c_drive_scl_lo */
-	0x07e407f1,
-	0xd00604b6,
-	0x04bd0001,
-/* 0x077e: i2c_drive_sda */
+/* 0x0758: perf_init */
+/* 0x075a: i2c_drive_scl */
 	0x36b000f8,
 	0x110bf400,
 	0x07e007f1,
 	0xd00604b6,
-	0x04bd0002,
-/* 0x0792: i2c_drive_sda_lo */
+	0x04bd0001,
+/* 0x076e: i2c_drive_scl_lo */
 	0x07f100f8,
 	0x04b607e4,
+	0x0001d006,
+	0x00f804bd,
+/* 0x077c: i2c_drive_sda */
+	0xf40036b0,
+	0x07f1110b,
+	0x04b607e0,
 	0x0002d006,
 	0x00f804bd,
-/* 0x07a0: i2c_sense_scl */
-	0xf10132f4,
-	0xb607c437,
-	0x33cf0634,
-	0x0431fd00,
-	0xf4060bf4,
-/* 0x07b6: i2c_sense_scl_done */
-	0x00f80131,
-/* 0x07b8: i2c_sense_sda */
-	0xf10132f4,
-	0xb607c437,
-	0x33cf0634,
-	0x0432fd00,
-	0xf4060bf4,
-/* 0x07ce: i2c_sense_sda_done */
-	0x00f80131,
-/* 0x07d0: i2c_raise_scl */
-	0x47f140f9,
-	0x37f00898,
-	0x5c21f501,
-/* 0x07dd: i2c_raise_scl_wait */
+/* 0x0790: i2c_drive_sda_lo */
+	0x07e407f1,
+	0xd00604b6,
+	0x04bd0002,
+/* 0x079e: i2c_sense_scl */
+	0x32f400f8,
+	0xc437f101,
+	0x0634b607,
+	0xfd0033cf,
+	0x0bf40431,
+	0x0131f406,
+/* 0x07b4: i2c_sense_scl_done */
+/* 0x07b6: i2c_sense_sda */
+	0x32f400f8,
+	0xc437f101,
+	0x0634b607,
+	0xfd0033cf,
+	0x0bf40432,
+	0x0131f406,
+/* 0x07cc: i2c_sense_sda_done */
+/* 0x07ce: i2c_raise_scl */
+	0x40f900f8,
+	0x089847f1,
+	0xf50137f0,
+/* 0x07db: i2c_raise_scl_wait */
+	0xf1075a21,
+	0xf403e8e7,
+	0x21f57e21,
+	0x01f4079e,
+	0x0142b609,
+/* 0x07ef: i2c_raise_scl_done */
+	0xfcef1bf4,
+/* 0x07f3: i2c_start */
+	0xf500f840,
+	0xf4079e21,
+	0x21f50d11,
+	0x11f407b6,
+	0x300ef406,
+/* 0x0804: i2c_start_rep */
+	0xf50037f0,
+	0xf0075a21,
+	0x21f50137,
+	0x76bb077c,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb607ce21,
+	0x11f40464,
+/* 0x0831: i2c_start_send */
+	0x0037f01f,
+	0x077c21f5,
+	0x1388e7f1,
+	0xf07e21f4,
+	0x21f50037,
+	0xe7f1075a,
+	0x21f41388,
+/* 0x084d: i2c_start_out */
+/* 0x084f: i2c_stop */
+	0xf000f87e,
+	0x21f50037,
+	0x37f0075a,
+	0x7c21f500,
 	0xe8e7f107,
 	0x7e21f403,
-	0x07a021f5,
-	0xb60901f4,
-	0x1bf40142,
-/* 0x07f1: i2c_raise_scl_done */
-	0xf840fcef,
-/* 0x07f5: i2c_start */
-	0xa021f500,
-	0x0d11f407,
-	0x07b821f5,
-	0xf40611f4,
-/* 0x0806: i2c_start_rep */
-	0x37f0300e,
-	0x5c21f500,
-	0x0137f007,
-	0x077e21f5,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0xd021f550,
-	0x0464b607,
-/* 0x0833: i2c_start_send */
-	0xf01f11f4,
-	0x21f50037,
-	0xe7f1077e,
-	0x21f41388,
-	0x0037f07e,
-	0x075c21f5,
-	0x1388e7f1,
-/* 0x084f: i2c_start_out */
-	0xf87e21f4,
-/* 0x0851: i2c_stop */
-	0x0037f000,
-	0x075c21f5,
-	0xf50037f0,
-	0xf1077e21,
-	0xf403e8e7,
-	0x37f07e21,
-	0x5c21f501,
-	0x88e7f107,
-	0x7e21f413,
 	0xf50137f0,
-	0xf1077e21,
+	0xf1075a21,
 	0xf41388e7,
-	0x00f87e21,
-/* 0x0884: i2c_bitw */
-	0x077e21f5,
-	0x03e8e7f1,
-	0xbb7e21f4,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x07d021f5,
-	0xf40464b6,
-	0xe7f11811,
-	0x21f41388,
-	0x0037f07e,
-	0x075c21f5,
-	0x1388e7f1,
-/* 0x08c3: i2c_bitw_out */
-	0xf87e21f4,
-/* 0x08c5: i2c_bitr */
-	0x0137f000,
-	0x077e21f5,
-	0x03e8e7f1,
-	0xbb7e21f4,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x07d021f5,
-	0xf40464b6,
-	0x21f51b11,
-	0x37f007b8,
-	0x5c21f500,
+	0x37f07e21,
+	0x7c21f501,
 	0x88e7f107,
 	0x7e21f413,
-	0xf4013cf0,
-/* 0x090a: i2c_bitr_done */
-	0x00f80131,
-/* 0x090c: i2c_get_byte */
-	0xf00057f0,
-/* 0x0912: i2c_get_byte_next */
-	0x54b60847,
-	0x0076bb01,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x21f550fc,
-	0x64b608c5,
-	0x2b11f404,
-	0xb60553fd,
-	0x1bf40142,
-	0x0137f0d8,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x8421f550,
-	0x0464b608,
-/* 0x095c: i2c_get_byte_done */
-/* 0x095e: i2c_put_byte */
-	0x47f000f8,
-/* 0x0961: i2c_put_byte_next */
-	0x0142b608,
-	0xbb3854ff,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x088421f5,
-	0xf40464b6,
-	0x46b03411,
-	0xd81bf400,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0xc521f550,
-	0x0464b608,
-	0xbb0f11f4,
-	0x36b00076,
-	0x061bf401,
-/* 0x09b7: i2c_put_byte_done */
-	0xf80132f4,
-/* 0x09b9: i2c_addr */
-	0x0076bb00,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x21f550fc,
-	0x64b607f5,
-	0x2911f404,
-	0x012ec3e7,
-	0xfd0134b6,
-	0x76bb0553,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0xf550fc04,
-	0xb6095e21,
-/* 0x09fe: i2c_addr_done */
-	0x00f80464,
-/* 0x0a00: i2c_acquire_addr */
-	0xb6f8cec7,
-	0xe0b702e4,
-	0xee980d1c,
-/* 0x0a0f: i2c_acquire */
-	0xf500f800,
-	0xf40a0021,
-	0xd9f00421,
-	0x4021f403,
-/* 0x0a1e: i2c_release */
+/* 0x0882: i2c_bitw */
 	0x21f500f8,
-	0x21f40a00,
-	0x03daf004,
-	0xf84021f4,
-/* 0x0a2d: i2c_recv */
-	0x0132f400,
-	0xb6f8c1c7,
-	0x16b00214,
-	0x3a1ff528,
-	0xf413a001,
-	0x0032980c,
-	0x0ccc13a0,
-	0xf4003198,
-	0xd0f90231,
-	0xd0f9e0f9,
-	0x000067f1,
-	0x100063f1,
-	0xbb016792,
+	0xe7f1077c,
+	0x21f403e8,
+	0x0076bb7e,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b607ce,
+	0x1811f404,
+	0x1388e7f1,
+	0xf07e21f4,
+	0x21f50037,
+	0xe7f1075a,
+	0x21f41388,
+/* 0x08c1: i2c_bitw_out */
+/* 0x08c3: i2c_bitr */
+	0xf000f87e,
+	0x21f50137,
+	0xe7f1077c,
+	0x21f403e8,
+	0x0076bb7e,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b607ce,
+	0x1b11f404,
+	0x07b621f5,
+	0xf50037f0,
+	0xf1075a21,
+	0xf41388e7,
+	0x3cf07e21,
+	0x0131f401,
+/* 0x0908: i2c_bitr_done */
+/* 0x090a: i2c_get_byte */
+	0x57f000f8,
+	0x0847f000,
+/* 0x0910: i2c_get_byte_next */
+	0xbb0154b6,
 	0x65b60076,
 	0x9450f904,
 	0x56bb0465,
 	0xfd50bd02,
 	0x50fc0475,
-	0x0a0f21f5,
-	0xfc0464b6,
-	0x00d6b0d0,
-	0x00b31bf5,
-	0xbb0057f0,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x09b921f5,
-	0xf50464b6,
-	0xc700d011,
-	0x76bbe0c5,
+	0x08c321f5,
+	0xf40464b6,
+	0x53fd2b11,
+	0x0142b605,
+	0xf0d81bf4,
+	0x76bb0137,
 	0x0465b600,
 	0x659450f9,
 	0x0256bb04,
 	0x75fd50bd,
 	0xf550fc04,
-	0xb6095e21,
-	0x11f50464,
-	0x57f000ad,
+	0xb6088221,
+/* 0x095a: i2c_get_byte_done */
+	0x00f80464,
+/* 0x095c: i2c_put_byte */
+/* 0x095f: i2c_put_byte_next */
+	0xb60847f0,
+	0x54ff0142,
+	0x0076bb38,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b60882,
+	0x3411f404,
+	0xf40046b0,
+	0x76bbd81b,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb608c321,
+	0x11f40464,
+	0x0076bb0f,
+	0xf40136b0,
+	0x32f4061b,
+/* 0x09b5: i2c_put_byte_done */
+/* 0x09b7: i2c_addr */
+	0xbb00f801,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x07f321f5,
+	0xf40464b6,
+	0xc3e72911,
+	0x34b6012e,
+	0x0553fd01,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x5c21f550,
+	0x0464b609,
+/* 0x09fc: i2c_addr_done */
+/* 0x09fe: i2c_acquire_addr */
+	0xcec700f8,
+	0x02e4b6f8,
+	0x0d1ce0b7,
+	0xf800ee98,
+/* 0x0a0d: i2c_acquire */
+	0xfe21f500,
+	0x0421f409,
+	0xf403d9f0,
+	0x00f84021,
+/* 0x0a1c: i2c_release */
+	0x09fe21f5,
+	0xf00421f4,
+	0x21f403da,
+/* 0x0a2b: i2c_recv */
+	0xf400f840,
+	0xc1c70132,
+	0x0214b6f8,
+	0xf52816b0,
+	0xa0013a1f,
+	0x980cf413,
+	0x13a00032,
+	0x31980ccc,
+	0x0231f400,
+	0xe0f9d0f9,
+	0x67f1d0f9,
+	0x63f10000,
+	0x67921000,
 	0x0076bb01,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
 	0x21f550fc,
-	0x64b609b9,
-	0x8a11f504,
+	0x64b60a0d,
+	0xb0d0fc04,
+	0x1bf500d6,
+	0x57f000b3,
 	0x0076bb00,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
 	0x21f550fc,
-	0x64b6090c,
-	0x6a11f404,
-	0xbbe05bcb,
+	0x64b609b7,
+	0xd011f504,
+	0xe0c5c700,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x5c21f550,
+	0x0464b609,
+	0x00ad11f5,
+	0xbb0157f0,
 	0x65b60076,
 	0x9450f904,
 	0x56bb0465,
 	0xfd50bd02,
 	0x50fc0475,
-	0x085121f5,
-	0xb90464b6,
-	0x74bd025b,
-/* 0x0b33: i2c_recv_not_rd08 */
-	0xb0430ef4,
-	0x1bf401d6,
-	0x0057f03d,
-	0x09b921f5,
-	0xc73311f4,
-	0x21f5e0c5,
-	0x11f4095e,
-	0x0057f029,
-	0x09b921f5,
-	0xc71f11f4,
-	0x21f5e0b5,
-	0x11f4095e,
-	0x5121f515,
-	0xc774bd08,
-	0x1bf408c5,
-	0x0232f409,
-/* 0x0b73: i2c_recv_not_wr08 */
-/* 0x0b73: i2c_recv_done */
-	0xc7030ef4,
-	0x21f5f8ce,
-	0xe0fc0a1e,
-	0x12f4d0fc,
-	0x027cb90a,
-	0x033621f5,
-/* 0x0b88: i2c_recv_exit */
-/* 0x0b8a: i2c_init */
-	0x00f800f8,
-/* 0x0b8c: test_recv */
-	0x05d817f1,
-	0xcf0614b6,
-	0x10b60011,
-	0xd807f101,
-	0x0604b605,
-	0xbd0001d0,
-	0x00e7f104,
-	0x4fe3f1d9,
-	0x5621f513,
-/* 0x0bb3: test_init */
-	0xf100f802,
-	0xf50800e7,
-	0xf8025621,
-/* 0x0bbd: idle_recv */
-/* 0x0bbf: idle */
-	0xf400f800,
-	0x17f10031,
-	0x14b605d4,
+	0x09b721f5,
+	0xf50464b6,
+	0xbb008a11,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x090a21f5,
+	0xf40464b6,
+	0x5bcb6a11,
+	0x0076bbe0,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b6084f,
+	0x025bb904,
+	0x0ef474bd,
+/* 0x0b31: i2c_recv_not_rd08 */
+	0x01d6b043,
+	0xf03d1bf4,
+	0x21f50057,
+	0x11f409b7,
+	0xe0c5c733,
+	0x095c21f5,
+	0xf02911f4,
+	0x21f50057,
+	0x11f409b7,
+	0xe0b5c71f,
+	0x095c21f5,
+	0xf51511f4,
+	0xbd084f21,
+	0x08c5c774,
+	0xf4091bf4,
+	0x0ef40232,
+/* 0x0b71: i2c_recv_not_wr08 */
+/* 0x0b71: i2c_recv_done */
+	0xf8cec703,
+	0x0a1c21f5,
+	0xd0fce0fc,
+	0xb90a12f4,
+	0x21f5027c,
+/* 0x0b86: i2c_recv_exit */
+	0x00f80336,
+/* 0x0b88: i2c_init */
+/* 0x0b8a: test_recv */
+	0x17f100f8,
+	0x14b605d8,
 	0x0011cf06,
 	0xf10110b6,
-	0xb605d407,
+	0xb605d807,
 	0x01d00604,
-/* 0x0bdb: idle_loop */
-	0xf004bd00,
-	0x32f45817,
-/* 0x0be1: idle_proc */
-/* 0x0be1: idle_proc_exec */
-	0xb910f902,
-	0x21f5021e,
-	0x10fc033f,
-	0xf40911f4,
-	0x0ef40231,
-/* 0x0bf5: idle_proc_next */
-	0x5810b6ef,
-	0xf4061fb8,
-	0x02f4e61b,
-	0x0028f4dd,
-	0x00bb0ef4,
+	0xf104bd00,
+	0xf1d900e7,
+	0xf5134fe3,
+	0xf8025621,
+/* 0x0bb1: test_init */
+	0x00e7f100,
+	0x5621f508,
+/* 0x0bbb: idle_recv */
+	0xf800f802,
+/* 0x0bbd: idle */
+	0x0031f400,
+	0x05d417f1,
+	0xcf0614b6,
+	0x10b60011,
+	0xd407f101,
+	0x0604b605,
+	0xbd0001d0,
+/* 0x0bd9: idle_loop */
+	0x5817f004,
+/* 0x0bdf: idle_proc */
+/* 0x0bdf: idle_proc_exec */
+	0xf90232f4,
+	0x021eb910,
+	0x033f21f5,
+	0x11f410fc,
+	0x0231f409,
+/* 0x0bf3: idle_proc_next */
+	0xb6ef0ef4,
+	0x1fb85810,
+	0xe61bf406,
+	0xf4dd02f4,
+	0x0ef40028,
+	0x000000bb,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
index 3c731ff..9582224 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
@@ -46,8 +46,8 @@
 	0x00000000,
 	0x00000000,
 	0x584d454d,
-	0x000005f3,
-	0x000005e5,
+	0x000005ee,
+	0x000005e0,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -68,8 +68,8 @@
 	0x00000000,
 	0x00000000,
 	0x46524550,
-	0x000005f7,
-	0x000005f5,
+	0x000005f2,
+	0x000005f0,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -90,8 +90,8 @@
 	0x00000000,
 	0x00000000,
 	0x5f433249,
-	0x000009f8,
-	0x000008a2,
+	0x000009f3,
+	0x0000089d,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -112,8 +112,8 @@
 	0x00000000,
 	0x00000000,
 	0x54534554,
-	0x00000a16,
-	0x000009fa,
+	0x00000a11,
+	0x000009f5,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -134,8 +134,8 @@
 	0x00000000,
 	0x00000000,
 	0x454c4449,
-	0x00000a21,
-	0x00000a1f,
+	0x00000a1c,
+	0x00000a1a,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -233,22 +233,22 @@
 /* 0x037c: memx_func_next */
 	0x00000002,
 	0x00000000,
-	0x000004cf,
+	0x000004cc,
 	0x00000003,
 	0x00000002,
-	0x00000546,
+	0x00000541,
 	0x00040004,
 	0x00000000,
-	0x00000563,
+	0x0000055e,
 	0x00010005,
 	0x00000000,
-	0x0000057d,
+	0x00000578,
 	0x00010006,
 	0x00000000,
-	0x00000541,
+	0x0000053c,
 	0x00000007,
 	0x00000000,
-	0x00000589,
+	0x00000584,
 /* 0x03c4: memx_func_tail */
 /* 0x03c4: memx_ts_start */
 	0x00000000,
@@ -1238,454 +1238,454 @@
 	0x0001f604,
 	0x00f804bd,
 /* 0x045c: memx_func_enter */
-	0x162067f1,
-	0xf55d77f1,
-	0x047e6eb2,
-	0xd8b20000,
-	0xf90487fd,
-	0xfc80f960,
-	0x7ee0fcd0,
-	0x0700002d,
-	0x7e6eb2fe,
+	0x47162046,
+	0x6eb2f55d,
+	0x0000047e,
+	0x87fdd8b2,
+	0xf960f904,
+	0xfcd0fc80,
+	0x002d7ee0,
+	0xb2fe0700,
+	0x00047e6e,
+	0xfdd8b200,
+	0x60f90487,
+	0xd0fc80f9,
+	0x2d7ee0fc,
+	0xf0460000,
+	0x7e6eb226,
 	0xb2000004,
 	0x0487fdd8,
 	0x80f960f9,
 	0xe0fcd0fc,
 	0x00002d7e,
-	0x26f067f1,
-	0x047e6eb2,
-	0xd8b20000,
-	0xf90487fd,
-	0xfc80f960,
-	0x7ee0fcd0,
-	0x0600002d,
-	0x07e04004,
-	0xbd0006f6,
-/* 0x04b9: memx_func_enter_wait */
-	0x07c04604,
-	0xf00066cf,
-	0x0bf40464,
-	0xcf2c06f7,
-	0x06b50066,
-/* 0x04cf: memx_func_leave */
-	0x0600f8f1,
-	0x0066cf2c,
-	0x06f206b5,
-	0x07e44004,
-	0xbd0006f6,
-/* 0x04e1: memx_func_leave_wait */
-	0x07c04604,
-	0xf00066cf,
-	0x1bf40464,
-	0xf067f1f7,
+	0xe0400406,
+	0x0006f607,
+/* 0x04b6: memx_func_enter_wait */
+	0xc04604bd,
+	0x0066cf07,
+	0xf40464f0,
+	0x2c06f70b,
+	0xb50066cf,
+	0x00f8f106,
+/* 0x04cc: memx_func_leave */
+	0x66cf2c06,
+	0xf206b500,
+	0xe4400406,
+	0x0006f607,
+/* 0x04de: memx_func_leave_wait */
+	0xc04604bd,
+	0x0066cf07,
+	0xf40464f0,
+	0xf046f71b,
 	0xb2010726,
 	0x00047e6e,
 	0xfdd8b200,
 	0x60f90587,
 	0xd0fc80f9,
 	0x2d7ee0fc,
-	0x67f10000,
-	0x6eb21620,
-	0x0000047e,
-	0x87fdd8b2,
-	0xf960f905,
-	0xfcd0fc80,
-	0x002d7ee0,
-	0x0aa24700,
-	0x047e6eb2,
-	0xd8b20000,
-	0xf90587fd,
-	0xfc80f960,
-	0x7ee0fcd0,
-	0xf800002d,
-/* 0x0541: memx_func_wait_vblank */
-	0x0410b600,
-/* 0x0546: memx_func_wr32 */
-	0x169800f8,
-	0x01159800,
-	0xf90810b6,
-	0xfc50f960,
-	0x7ee0fcd0,
-	0xb600002d,
-	0x1bf40242,
-/* 0x0563: memx_func_wait */
-	0x0800f8e8,
-	0x0088cf2c,
-	0x98001e98,
-	0x1c98011d,
-	0x031b9802,
-	0x7e1010b6,
-	0xf8000074,
-/* 0x057d: memx_func_delay */
-	0x001e9800,
-	0x7e0410b6,
-	0xf8000058,
-/* 0x0589: memx_func_train */
-/* 0x058b: memx_exec */
-	0xf900f800,
-	0xb2d0f9e0,
-/* 0x0593: memx_exec_next */
-	0x98b2b2c1,
-	0x10b60013,
-	0xf034e704,
-	0xe033e701,
-	0x0132b601,
-	0x980c30f0,
-	0x55f9de35,
-	0x1ef412a6,
-	0xf10b98e5,
-	0xbbf20c98,
-	0xc44b02cb,
-	0x00bbcf07,
+	0x20460000,
+	0x7e6eb216,
+	0xb2000004,
+	0x0587fdd8,
+	0x80f960f9,
 	0xe0fcd0fc,
-	0x00029f7e,
-/* 0x05ca: memx_info */
-	0xc67000f8,
-	0x0c0bf401,
-/* 0x05d0: memx_info_data */
-	0x4b03cc4c,
-	0x0ef40800,
-/* 0x05d9: memx_info_train */
-	0x0bcc4c09,
-/* 0x05df: memx_info_send */
-	0x7e01004b,
-	0xf800029f,
-/* 0x05e5: memx_recv */
-	0x01d6b000,
-	0xb0a30bf4,
-	0x0bf400d6,
-/* 0x05f3: memx_init */
-	0xf800f8dc,
-/* 0x05f5: perf_recv */
-/* 0x05f7: perf_init */
-	0xf800f800,
-/* 0x05f9: i2c_drive_scl */
-	0x0036b000,
-	0x400d0bf4,
-	0x01f607e0,
-	0xf804bd00,
-/* 0x0609: i2c_drive_scl_lo */
-	0x07e44000,
-	0xbd0001f6,
-/* 0x0613: i2c_drive_sda */
-	0xb000f804,
-	0x0bf40036,
-	0x07e0400d,
-	0xbd0002f6,
-/* 0x0623: i2c_drive_sda_lo */
-	0x4000f804,
-	0x02f607e4,
-	0xf804bd00,
-/* 0x062d: i2c_sense_scl */
-	0x0132f400,
-	0xcf07c443,
-	0x31fd0033,
-	0x060bf404,
-/* 0x063f: i2c_sense_scl_done */
-	0xf80131f4,
-/* 0x0641: i2c_sense_sda */
-	0x0132f400,
-	0xcf07c443,
-	0x32fd0033,
-	0x060bf404,
-/* 0x0653: i2c_sense_sda_done */
-	0xf80131f4,
-/* 0x0655: i2c_raise_scl */
-	0x4440f900,
-	0x01030898,
-	0x0005f97e,
-/* 0x0660: i2c_raise_scl_wait */
-	0x7e03e84e,
-	0x7e000058,
-	0xf400062d,
-	0x42b60901,
-	0xef1bf401,
-/* 0x0674: i2c_raise_scl_done */
-	0x00f840fc,
-/* 0x0678: i2c_start */
-	0x00062d7e,
-	0x7e0d11f4,
-	0xf4000641,
-	0x0ef40611,
-/* 0x0689: i2c_start_rep */
-	0x7e00032e,
-	0x030005f9,
-	0x06137e01,
-	0x0076bb00,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x557e50fc,
-	0x64b60006,
-	0x1d11f404,
-/* 0x06b4: i2c_start_send */
-	0x137e0003,
-	0x884e0006,
-	0x00587e13,
-	0x7e000300,
-	0x4e0005f9,
-	0x587e1388,
-/* 0x06ce: i2c_start_out */
+	0x00002d7e,
+	0xb20aa247,
+	0x00047e6e,
+	0xfdd8b200,
+	0x60f90587,
+	0xd0fc80f9,
+	0x2d7ee0fc,
 	0x00f80000,
-/* 0x06d0: i2c_stop */
-	0xf97e0003,
-	0x00030005,
-	0x0006137e,
-	0x7e03e84e,
-	0x03000058,
-	0x05f97e01,
+/* 0x053c: memx_func_wait_vblank */
+	0xf80410b6,
+/* 0x0541: memx_func_wr32 */
+	0x00169800,
+	0xb6011598,
+	0x60f90810,
+	0xd0fc50f9,
+	0x2d7ee0fc,
+	0x42b60000,
+	0xe81bf402,
+/* 0x055e: memx_func_wait */
+	0x2c0800f8,
+	0x980088cf,
+	0x1d98001e,
+	0x021c9801,
+	0xb6031b98,
+	0x747e1010,
+	0x00f80000,
+/* 0x0578: memx_func_delay */
+	0xb6001e98,
+	0x587e0410,
+	0x00f80000,
+/* 0x0584: memx_func_train */
+/* 0x0586: memx_exec */
+	0xe0f900f8,
+	0xc1b2d0f9,
+/* 0x058e: memx_exec_next */
+	0x1398b2b2,
+	0x0410b600,
+	0x01f034e7,
+	0x01e033e7,
+	0xf00132b6,
+	0x35980c30,
+	0xa655f9de,
+	0xe51ef412,
+	0x98f10b98,
+	0xcbbbf20c,
+	0x07c44b02,
+	0xfc00bbcf,
+	0x7ee0fcd0,
+	0xf800029f,
+/* 0x05c5: memx_info */
+	0x01c67000,
+/* 0x05cb: memx_info_data */
+	0x4c0c0bf4,
+	0x004b03cc,
+	0x090ef408,
+/* 0x05d4: memx_info_train */
+	0x4b0bcc4c,
+/* 0x05da: memx_info_send */
+	0x9f7e0100,
+	0x00f80002,
+/* 0x05e0: memx_recv */
+	0xf401d6b0,
+	0xd6b0a30b,
+	0xdc0bf400,
+/* 0x05ee: memx_init */
+	0x00f800f8,
+/* 0x05f0: perf_recv */
+/* 0x05f2: perf_init */
+	0x00f800f8,
+/* 0x05f4: i2c_drive_scl */
+	0xf40036b0,
+	0xe0400d0b,
+	0x0001f607,
+	0x00f804bd,
+/* 0x0604: i2c_drive_scl_lo */
+	0xf607e440,
+	0x04bd0001,
+/* 0x060e: i2c_drive_sda */
+	0x36b000f8,
+	0x0d0bf400,
+	0xf607e040,
+	0x04bd0002,
+/* 0x061e: i2c_drive_sda_lo */
+	0xe44000f8,
+	0x0002f607,
+	0x00f804bd,
+/* 0x0628: i2c_sense_scl */
+	0x430132f4,
+	0x33cf07c4,
+	0x0431fd00,
+	0xf4060bf4,
+/* 0x063a: i2c_sense_scl_done */
+	0x00f80131,
+/* 0x063c: i2c_sense_sda */
+	0x430132f4,
+	0x33cf07c4,
+	0x0432fd00,
+	0xf4060bf4,
+/* 0x064e: i2c_sense_sda_done */
+	0x00f80131,
+/* 0x0650: i2c_raise_scl */
+	0x984440f9,
+	0x7e010308,
+/* 0x065b: i2c_raise_scl_wait */
+	0x4e0005f4,
+	0x587e03e8,
+	0x287e0000,
+	0x01f40006,
+	0x0142b609,
+/* 0x066f: i2c_raise_scl_done */
+	0xfcef1bf4,
+/* 0x0673: i2c_start */
+	0x7e00f840,
+	0xf4000628,
+	0x3c7e0d11,
+	0x11f40006,
+	0x2e0ef406,
+/* 0x0684: i2c_start_rep */
+	0xf47e0003,
+	0x01030005,
+	0x00060e7e,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x06507e50,
+	0x0464b600,
+/* 0x06af: i2c_start_send */
+	0x031d11f4,
+	0x060e7e00,
 	0x13884e00,
 	0x0000587e,
-	0x137e0103,
-	0x884e0006,
-	0x00587e13,
-/* 0x06ff: i2c_bitw */
-	0x7e00f800,
-	0x4e000613,
-	0x587e03e8,
-	0x76bb0000,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0x7e50fc04,
-	0xb6000655,
-	0x11f40464,
-	0x13884e17,
-	0x0000587e,
-	0xf97e0003,
+	0xf47e0003,
 	0x884e0005,
 	0x00587e13,
-/* 0x073d: i2c_bitw_out */
-/* 0x073f: i2c_bitr */
+/* 0x06c9: i2c_start_out */
+/* 0x06cb: i2c_stop */
 	0x0300f800,
-	0x06137e01,
-	0x03e84e00,
-	0x0000587e,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x06557e50,
-	0x0464b600,
-	0x7e1a11f4,
-	0x03000641,
-	0x05f97e00,
+	0x05f47e00,
+	0x7e000300,
+	0x4e00060e,
+	0x587e03e8,
+	0x01030000,
+	0x0005f47e,
+	0x7e13884e,
+	0x03000058,
+	0x060e7e01,
 	0x13884e00,
 	0x0000587e,
-	0xf4013cf0,
-/* 0x0782: i2c_bitr_done */
-	0x00f80131,
-/* 0x0784: i2c_get_byte */
-	0x08040005,
-/* 0x0788: i2c_get_byte_next */
-	0xbb0154b6,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x00073f7e,
-	0xf40464b6,
-	0x53fd2a11,
-	0x0142b605,
-	0x03d81bf4,
-	0x0076bb01,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0xff7e50fc,
-	0x64b60006,
-/* 0x07d1: i2c_get_byte_done */
-/* 0x07d3: i2c_put_byte */
-	0x0400f804,
-/* 0x07d5: i2c_put_byte_next */
-	0x0142b608,
-	0xbb3854ff,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x0006ff7e,
-	0xf40464b6,
-	0x46b03411,
-	0xd81bf400,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x073f7e50,
-	0x0464b600,
-	0xbb0f11f4,
-	0x36b00076,
-	0x061bf401,
-/* 0x082b: i2c_put_byte_done */
-	0xf80132f4,
-/* 0x082d: i2c_addr */
+/* 0x06fa: i2c_bitw */
+	0x0e7e00f8,
+	0xe84e0006,
+	0x00587e03,
 	0x0076bb00,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
-	0x787e50fc,
+	0x507e50fc,
 	0x64b60006,
-	0x2911f404,
-	0x012ec3e7,
-	0xfd0134b6,
-	0x76bb0553,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0x7e50fc04,
-	0xb60007d3,
-/* 0x0872: i2c_addr_done */
-	0x00f80464,
-/* 0x0874: i2c_acquire_addr */
-	0xb6f8cec7,
-	0xe0b705e4,
-	0x00f8d014,
-/* 0x0880: i2c_acquire */
-	0x0008747e,
-	0x0000047e,
-	0x7e03d9f0,
-	0xf800002d,
-/* 0x0891: i2c_release */
-	0x08747e00,
-	0x00047e00,
-	0x03daf000,
-	0x00002d7e,
-/* 0x08a2: i2c_recv */
-	0x32f400f8,
-	0xf8c1c701,
-	0xb00214b6,
-	0x1ff52816,
-	0x13b80134,
-	0x98000cf4,
-	0x13b80032,
-	0x98000ccc,
-	0x31f40031,
-	0xf9d0f902,
-	0xd6d0f9e0,
-	0x10000000,
-	0xbb016792,
+	0x1711f404,
+	0x7e13884e,
+	0x03000058,
+	0x05f47e00,
+	0x13884e00,
+	0x0000587e,
+/* 0x0738: i2c_bitw_out */
+/* 0x073a: i2c_bitr */
+	0x010300f8,
+	0x00060e7e,
+	0x7e03e84e,
+	0xbb000058,
 	0x65b60076,
 	0x9450f904,
 	0x56bb0465,
 	0xfd50bd02,
 	0x50fc0475,
-	0x0008807e,
-	0xfc0464b6,
-	0x00d6b0d0,
-	0x00b01bf5,
-	0x76bb0005,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0x7e50fc04,
-	0xb600082d,
-	0x11f50464,
-	0xc5c700cc,
-	0x0076bbe0,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0xd37e50fc,
-	0x64b60007,
-	0xa911f504,
-	0xbb010500,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x00082d7e,
-	0xf50464b6,
-	0xbb008711,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x0007847e,
+	0x0006507e,
 	0xf40464b6,
-	0x5bcb6711,
-	0x0076bbe0,
+	0x3c7e1a11,
+	0x00030006,
+	0x0005f47e,
+	0x7e13884e,
+	0xf0000058,
+	0x31f4013c,
+/* 0x077d: i2c_bitr_done */
+/* 0x077f: i2c_get_byte */
+	0x0500f801,
+/* 0x0783: i2c_get_byte_next */
+	0xb6080400,
+	0x76bb0154,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0x7e50fc04,
+	0xb600073a,
+	0x11f40464,
+	0x0553fd2a,
+	0xf40142b6,
+	0x0103d81b,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x06fa7e50,
+	0x0464b600,
+/* 0x07cc: i2c_get_byte_done */
+/* 0x07ce: i2c_put_byte */
+	0x080400f8,
+/* 0x07d0: i2c_put_byte_next */
+	0xff0142b6,
+	0x76bb3854,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0x7e50fc04,
+	0xb60006fa,
+	0x11f40464,
+	0x0046b034,
+	0xbbd81bf4,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x00073a7e,
+	0xf40464b6,
+	0x76bb0f11,
+	0x0136b000,
+	0xf4061bf4,
+/* 0x0826: i2c_put_byte_done */
+	0x00f80132,
+/* 0x0828: i2c_addr */
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x06737e50,
+	0x0464b600,
+	0xe72911f4,
+	0xb6012ec3,
+	0x53fd0134,
+	0x0076bb05,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
-	0xd07e50fc,
-	0x64b60006,
-	0xbd5bb204,
-	0x410ef474,
-/* 0x09a4: i2c_recv_not_rd08 */
-	0xf401d6b0,
-	0x00053b1b,
-	0x00082d7e,
-	0xc73211f4,
-	0xd37ee0c5,
-	0x11f40007,
-	0x7e000528,
-	0xf400082d,
-	0xb5c71f11,
-	0x07d37ee0,
-	0x1511f400,
-	0x0006d07e,
-	0xc5c774bd,
-	0x091bf408,
-	0xf40232f4,
-/* 0x09e2: i2c_recv_not_wr08 */
-/* 0x09e2: i2c_recv_done */
-	0xcec7030e,
-	0x08917ef8,
-	0xfce0fc00,
-	0x0912f4d0,
-	0x9f7e7cb2,
-/* 0x09f6: i2c_recv_exit */
-	0x00f80002,
-/* 0x09f8: i2c_init */
-/* 0x09fa: test_recv */
-	0x584100f8,
-	0x0011cf04,
-	0x400110b6,
-	0x01f60458,
-	0xde04bd00,
-	0x134fd900,
-	0x0001de7e,
-/* 0x0a16: test_init */
-	0x004e00f8,
-	0x01de7e08,
-/* 0x0a1f: idle_recv */
+	0xce7e50fc,
+	0x64b60007,
+/* 0x086d: i2c_addr_done */
+/* 0x086f: i2c_acquire_addr */
+	0xc700f804,
+	0xe4b6f8ce,
+	0x14e0b705,
+/* 0x087b: i2c_acquire */
+	0x7e00f8d0,
+	0x7e00086f,
+	0xf0000004,
+	0x2d7e03d9,
+	0x00f80000,
+/* 0x088c: i2c_release */
+	0x00086f7e,
+	0x0000047e,
+	0x7e03daf0,
+	0xf800002d,
+/* 0x089d: i2c_recv */
+	0x0132f400,
+	0xb6f8c1c7,
+	0x16b00214,
+	0x341ff528,
+	0xf413b801,
+	0x3298000c,
+	0xcc13b800,
+	0x3198000c,
+	0x0231f400,
+	0xe0f9d0f9,
+	0x00d6d0f9,
+	0x92100000,
+	0x76bb0167,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0x7e50fc04,
+	0xb600087b,
+	0xd0fc0464,
+	0xf500d6b0,
+	0x0500b01b,
+	0x0076bb00,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x287e50fc,
+	0x64b60008,
+	0xcc11f504,
+	0xe0c5c700,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x07ce7e50,
+	0x0464b600,
+	0x00a911f5,
+	0x76bb0105,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0x7e50fc04,
+	0xb6000828,
+	0x11f50464,
+	0x76bb0087,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0x7e50fc04,
+	0xb600077f,
+	0x11f40464,
+	0xe05bcb67,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x06cb7e50,
+	0x0464b600,
+	0x74bd5bb2,
+/* 0x099f: i2c_recv_not_rd08 */
+	0xb0410ef4,
+	0x1bf401d6,
+	0x7e00053b,
+	0xf4000828,
+	0xc5c73211,
+	0x07ce7ee0,
+	0x2811f400,
+	0x287e0005,
+	0x11f40008,
+	0xe0b5c71f,
+	0x0007ce7e,
+	0x7e1511f4,
+	0xbd0006cb,
+	0x08c5c774,
+	0xf4091bf4,
+	0x0ef40232,
+/* 0x09dd: i2c_recv_not_wr08 */
+/* 0x09dd: i2c_recv_done */
+	0xf8cec703,
+	0x00088c7e,
+	0xd0fce0fc,
+	0xb20912f4,
+	0x029f7e7c,
+/* 0x09f1: i2c_recv_exit */
+/* 0x09f3: i2c_init */
 	0xf800f800,
-/* 0x0a21: idle */
-	0x0031f400,
-	0xcf045441,
-	0x10b60011,
-	0x04544001,
-	0xbd0001f6,
-/* 0x0a35: idle_loop */
-	0xf4580104,
-/* 0x0a3a: idle_proc */
-/* 0x0a3a: idle_proc_exec */
-	0x10f90232,
-	0xa87e1eb2,
-	0x10fc0002,
-	0xf40911f4,
-	0x0ef40231,
-/* 0x0a4d: idle_proc_next */
-	0x5810b6f0,
-	0x1bf41fa6,
-	0xe002f4e8,
-	0xf40028f4,
-	0x0000c60e,
+/* 0x09f5: test_recv */
+	0x04584100,
+	0xb60011cf,
+	0x58400110,
+	0x0001f604,
+	0x00de04bd,
+	0x7e134fd9,
+	0xf80001de,
+/* 0x0a11: test_init */
+	0x08004e00,
+	0x0001de7e,
+/* 0x0a1a: idle_recv */
+	0x00f800f8,
+/* 0x0a1c: idle */
+	0x410031f4,
+	0x11cf0454,
+	0x0110b600,
+	0xf6045440,
+	0x04bd0001,
+/* 0x0a30: idle_loop */
+	0x32f45801,
+/* 0x0a35: idle_proc */
+/* 0x0a35: idle_proc_exec */
+	0xb210f902,
+	0x02a87e1e,
+	0xf410fc00,
+	0x31f40911,
+	0xf00ef402,
+/* 0x0a48: idle_proc_next */
+	0xa65810b6,
+	0xe81bf41f,
+	0xf4e002f4,
+	0x0ef40028,
+	0x000000c6,
+	0x00000000,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
index e833418..e29b785 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
@@ -46,8 +46,8 @@
 	0x00000000,
 	0x00000000,
 	0x584d454d,
-	0x0000083a,
-	0x0000082c,
+	0x00000833,
+	0x00000825,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -68,8 +68,8 @@
 	0x00000000,
 	0x00000000,
 	0x46524550,
-	0x0000083e,
-	0x0000083c,
+	0x00000837,
+	0x00000835,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -90,8 +90,8 @@
 	0x00000000,
 	0x00000000,
 	0x5f433249,
-	0x00000c6e,
-	0x00000b11,
+	0x00000c67,
+	0x00000b0a,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -112,8 +112,8 @@
 	0x00000000,
 	0x00000000,
 	0x54534554,
-	0x00000c97,
-	0x00000c70,
+	0x00000c90,
+	0x00000c69,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -134,8 +134,8 @@
 	0x00000000,
 	0x00000000,
 	0x454c4449,
-	0x00000ca3,
-	0x00000ca1,
+	0x00000c9c,
+	0x00000c9a,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -233,22 +233,22 @@
 /* 0x037c: memx_func_next */
 	0x00000002,
 	0x00000000,
-	0x000005a0,
+	0x0000059f,
 	0x00000003,
 	0x00000002,
-	0x00000632,
+	0x0000062f,
 	0x00040004,
 	0x00000000,
-	0x0000064e,
+	0x0000064b,
 	0x00010005,
 	0x00000000,
-	0x0000066b,
+	0x00000668,
 	0x00010006,
 	0x00000000,
-	0x000005f0,
+	0x000005ef,
 	0x00000007,
 	0x00000000,
-	0x00000676,
+	0x00000673,
 /* 0x03c4: memx_func_tail */
 /* 0x03c4: memx_ts_start */
 	0x00000000,
@@ -1304,560 +1304,560 @@
 	0x67f102d7,
 	0x63f1fffc,
 	0x76fdffff,
-	0x0267f104,
-	0x0576fd00,
-	0x70f980f9,
-	0xe0fcd0fc,
-	0xf04021f4,
+	0x0267f004,
+	0xf90576fd,
+	0xfc70f980,
+	0xf4e0fcd0,
+	0x67f04021,
+	0xe007f104,
+	0x0604b607,
+	0xbd0006d0,
+/* 0x0581: memx_func_enter_wait */
+	0xc067f104,
+	0x0664b607,
+	0xf00066cf,
+	0x0bf40464,
+	0x2c67f0f3,
+	0xcf0664b6,
+	0x06800066,
+/* 0x059f: memx_func_leave */
+	0xf000f8f1,
+	0x64b62c67,
+	0x0066cf06,
+	0xf0f20680,
 	0x07f10467,
-	0x04b607e0,
+	0x04b607e4,
 	0x0006d006,
-/* 0x0582: memx_func_enter_wait */
+/* 0x05ba: memx_func_leave_wait */
 	0x67f104bd,
 	0x64b607c0,
 	0x0066cf06,
 	0xf40464f0,
-	0x67f0f30b,
-	0x0664b62c,
-	0x800066cf,
-	0x00f8f106,
-/* 0x05a0: memx_func_leave */
-	0xb62c67f0,
-	0x66cf0664,
-	0xf2068000,
-	0xf10467f0,
-	0xb607e407,
-	0x06d00604,
-/* 0x05bb: memx_func_leave_wait */
-	0xf104bd00,
-	0xb607c067,
-	0x66cf0664,
-	0x0464f000,
-	0xf1f31bf4,
-	0xb9161087,
-	0x21f4028e,
-	0x02d7b904,
-	0xffcc67f1,
-	0xffff63f1,
-	0xf90476fd,
-	0xfc70f980,
-	0xf4e0fcd0,
-	0x00f84021,
-/* 0x05f0: memx_func_wait_vblank */
-	0xb0001698,
-	0x0bf40066,
-	0x0166b013,
-	0xf4060bf4,
-/* 0x0602: memx_func_wait_vblank_head1 */
-	0x77f12e0e,
-	0x0ef40020,
-/* 0x0609: memx_func_wait_vblank_head0 */
-	0x0877f107,
-/* 0x060d: memx_func_wait_vblank_0 */
-	0xc467f100,
-	0x0664b607,
-	0xfd0066cf,
-	0x1bf40467,
-/* 0x061d: memx_func_wait_vblank_1 */
-	0xc467f1f3,
-	0x0664b607,
-	0xfd0066cf,
-	0x0bf40467,
-/* 0x062d: memx_func_wait_vblank_fini */
-	0x0410b6f3,
-/* 0x0632: memx_func_wr32 */
-	0x169800f8,
-	0x01159800,
-	0xf90810b6,
-	0xfc50f960,
-	0xf4e0fcd0,
-	0x42b64021,
-	0xe91bf402,
-/* 0x064e: memx_func_wait */
-	0x87f000f8,
-	0x0684b62c,
-	0x980088cf,
-	0x1d98001e,
-	0x021c9801,
-	0xb6031b98,
-	0x21f41010,
-/* 0x066b: memx_func_delay */
-	0x9800f8a3,
-	0x10b6001e,
-	0x7e21f404,
-/* 0x0676: memx_func_train */
-	0x57f100f8,
-	0x77f10003,
-	0x97f10000,
-	0x93f00000,
-	0x029eb970,
-	0xb90421f4,
-	0xe7f102d8,
-	0x21f42710,
-/* 0x0695: memx_func_train_loop_outer */
-	0x0158e07e,
-	0x0083f101,
-	0xe097f102,
-	0x1193f011,
-	0x80f990f9,
+	0x87f1f31b,
+	0x8eb91610,
+	0x0421f402,
+	0xf102d7b9,
+	0xf1ffcc67,
+	0xfdffff63,
+	0x80f90476,
+	0xd0fc70f9,
+	0x21f4e0fc,
+/* 0x05ef: memx_func_wait_vblank */
+	0x9800f840,
+	0x66b00016,
+	0x120bf400,
+	0xf40166b0,
+	0x0ef4060b,
+/* 0x0601: memx_func_wait_vblank_head1 */
+	0x2077f02c,
+/* 0x0607: memx_func_wait_vblank_head0 */
+	0xf0060ef4,
+/* 0x060a: memx_func_wait_vblank_0 */
+	0x67f10877,
+	0x64b607c4,
+	0x0066cf06,
+	0xf40467fd,
+/* 0x061a: memx_func_wait_vblank_1 */
+	0x67f1f31b,
+	0x64b607c4,
+	0x0066cf06,
+	0xf40467fd,
+/* 0x062a: memx_func_wait_vblank_fini */
+	0x10b6f30b,
+/* 0x062f: memx_func_wr32 */
+	0x9800f804,
+	0x15980016,
+	0x0810b601,
+	0x50f960f9,
 	0xe0fcd0fc,
-	0xf94021f4,
-	0x0067f150,
-/* 0x06b5: memx_func_train_loop_inner */
-	0x1187f100,
-	0x9068ff11,
-	0xfd109894,
-	0x97f10589,
-	0x93f00720,
-	0xf990f910,
-	0xfcd0fc80,
-	0x4021f4e0,
-	0x008097f1,
-	0xb91093f0,
-	0x21f4029e,
-	0x02d8b904,
-	0xf92088c5,
+	0xb64021f4,
+	0x1bf40242,
+/* 0x064b: memx_func_wait */
+	0xf000f8e9,
+	0x84b62c87,
+	0x0088cf06,
+	0x98001e98,
+	0x1c98011d,
+	0x031b9802,
+	0xf41010b6,
+	0x00f8a321,
+/* 0x0668: memx_func_delay */
+	0xb6001e98,
+	0x21f40410,
+/* 0x0673: memx_func_train */
+	0xf000f87e,
+	0x77f00357,
+	0x0097f100,
+	0x7093f000,
+	0xf4029eb9,
+	0xd8b90421,
+	0x10e7f102,
+	0x7e21f427,
+/* 0x0690: memx_func_train_loop_outer */
+	0x010158e0,
+	0x020083f1,
+	0x11e097f1,
+	0xf91193f0,
+	0xfc80f990,
+	0xf4e0fcd0,
+	0x50f94021,
+/* 0x06af: memx_func_train_loop_inner */
+	0xf10067f0,
+	0xff111187,
+	0x98949068,
+	0x0589fd10,
+	0x072097f1,
+	0xf91093f0,
 	0xfc80f990,
 	0xf4e0fcd0,
 	0x97f14021,
-	0x93f0053c,
-	0x0287f110,
-	0x0083f130,
-	0xf990f980,
+	0x93f00080,
+	0x029eb910,
+	0xb90421f4,
+	0x88c502d8,
+	0xf990f920,
 	0xfcd0fc80,
 	0x4021f4e0,
-	0x0560e7f1,
-	0xf110e3f0,
-	0xf10000d7,
-	0x908000d3,
-	0xb7f100dc,
-	0xb3f08480,
-	0xa321f41e,
-	0x000057f1,
-	0xffff97f1,
-	0x830093f1,
-/* 0x0734: memx_func_train_loop_4x */
-	0x0080a7f1,
-	0xb910a3f0,
-	0x21f402ae,
-	0x02d8b904,
-	0xffdfb7f1,
-	0xffffb3f1,
-	0xf9048bfd,
-	0xfc80f9a0,
+	0x053c97f1,
+	0xf11093f0,
+	0xf1300287,
+	0xf9800083,
+	0xfc80f990,
 	0xf4e0fcd0,
-	0xa7f14021,
-	0xa3f0053c,
-	0x0287f110,
-	0x0083f130,
-	0xf9a0f980,
-	0xfcd0fc80,
-	0x4021f4e0,
-	0x0560e7f1,
-	0xf110e3f0,
-	0xf10000d7,
-	0xb98000d3,
-	0xb7f102dc,
-	0xb3f02710,
-	0xa321f400,
-	0xf402eeb9,
-	0xddb90421,
-	0x949dff02,
+	0xe7f14021,
+	0xe3f00560,
+	0x00d7f110,
+	0x00d3f100,
+	0x00dc9080,
+	0x8480b7f1,
+	0xf41eb3f0,
+	0x57f0a321,
+	0xff97f100,
+	0x0093f1ff,
+/* 0x072d: memx_func_train_loop_4x */
+	0x80a7f183,
+	0x10a3f000,
+	0xf402aeb9,
+	0xd8b90421,
+	0xdfb7f102,
+	0xffb3f1ff,
+	0x048bfdff,
+	0x80f9a0f9,
+	0xe0fcd0fc,
+	0xf14021f4,
+	0xf0053ca7,
+	0x87f110a3,
+	0x83f13002,
+	0xa0f98000,
+	0xd0fc80f9,
+	0x21f4e0fc,
+	0x60e7f140,
+	0x10e3f005,
+	0x0000d7f1,
+	0x8000d3f1,
+	0xf102dcb9,
+	0xf02710b7,
+	0x21f400b3,
+	0x02eeb9a3,
+	0xb90421f4,
+	0x9dff02dd,
+	0x0150b694,
+	0xf4045670,
+	0x7aa0921e,
+	0xa9800bcc,
+	0x0160b600,
+	0x700470b6,
+	0x1ef51066,
+	0x50fcff01,
 	0x700150b6,
-	0x1ef40456,
-	0xcc7aa092,
-	0x00a9800b,
-	0xb60160b6,
-	0x66700470,
-	0x001ef510,
-	0xb650fcff,
-	0x56700150,
-	0xd41ef507,
-/* 0x07c7: memx_exec */
-	0xf900f8fe,
-	0xb9d0f9e0,
-	0xb2b902c1,
-/* 0x07d1: memx_exec_next */
-	0x00139802,
-	0xe70410b6,
-	0xe701f034,
-	0xb601e033,
-	0x30f00132,
-	0xde35980c,
-	0x12b855f9,
-	0xe41ef406,
-	0x98f10b98,
-	0xcbbbf20c,
-	0xc4b7f102,
-	0x06b4b607,
-	0xfc00bbcf,
-	0xf5e0fcd0,
+	0x1ef50756,
+	0x00f8fed6,
+/* 0x07c0: memx_exec */
+	0xd0f9e0f9,
+	0xb902c1b9,
+/* 0x07ca: memx_exec_next */
+	0x139802b2,
+	0x0410b600,
+	0x01f034e7,
+	0x01e033e7,
+	0xf00132b6,
+	0x35980c30,
+	0xb855f9de,
+	0x1ef40612,
+	0xf10b98e4,
+	0xbbf20c98,
+	0xb7f102cb,
+	0xb4b607c4,
+	0x00bbcf06,
+	0xe0fcd0fc,
+	0x033621f5,
+/* 0x0806: memx_info */
+	0xc67000f8,
+	0x0e0bf401,
+/* 0x080c: memx_info_data */
+	0x03ccc7f1,
+	0x0800b7f1,
+/* 0x0817: memx_info_train */
+	0xf10b0ef4,
+	0xf10bccc7,
+/* 0x081f: memx_info_send */
+	0xf50100b7,
 	0xf8033621,
-/* 0x080d: memx_info */
-	0x01c67000,
-/* 0x0813: memx_info_data */
-	0xf10e0bf4,
-	0xf103ccc7,
-	0xf40800b7,
-/* 0x081e: memx_info_train */
-	0xc7f10b0e,
-	0xb7f10bcc,
-/* 0x0826: memx_info_send */
-	0x21f50100,
-	0x00f80336,
-/* 0x082c: memx_recv */
-	0xf401d6b0,
-	0xd6b0980b,
-	0xd80bf400,
-/* 0x083a: memx_init */
-	0x00f800f8,
-/* 0x083c: perf_recv */
-/* 0x083e: perf_init */
-	0x00f800f8,
-/* 0x0840: i2c_drive_scl */
-	0xf40036b0,
-	0x07f1110b,
-	0x04b607e0,
-	0x0001d006,
-	0x00f804bd,
-/* 0x0854: i2c_drive_scl_lo */
-	0x07e407f1,
-	0xd00604b6,
-	0x04bd0001,
-/* 0x0862: i2c_drive_sda */
-	0x36b000f8,
-	0x110bf400,
-	0x07e007f1,
-	0xd00604b6,
-	0x04bd0002,
-/* 0x0876: i2c_drive_sda_lo */
-	0x07f100f8,
-	0x04b607e4,
-	0x0002d006,
-	0x00f804bd,
-/* 0x0884: i2c_sense_scl */
-	0xf10132f4,
-	0xb607c437,
-	0x33cf0634,
-	0x0431fd00,
-	0xf4060bf4,
-/* 0x089a: i2c_sense_scl_done */
-	0x00f80131,
-/* 0x089c: i2c_sense_sda */
-	0xf10132f4,
-	0xb607c437,
-	0x33cf0634,
-	0x0432fd00,
-	0xf4060bf4,
-/* 0x08b2: i2c_sense_sda_done */
-	0x00f80131,
-/* 0x08b4: i2c_raise_scl */
-	0x47f140f9,
-	0x37f00898,
-	0x4021f501,
-/* 0x08c1: i2c_raise_scl_wait */
+/* 0x0825: memx_recv */
+	0x01d6b000,
+	0xb0980bf4,
+	0x0bf400d6,
+/* 0x0833: memx_init */
+	0xf800f8d8,
+/* 0x0835: perf_recv */
+/* 0x0837: perf_init */
+	0xf800f800,
+/* 0x0839: i2c_drive_scl */
+	0x0036b000,
+	0xf1110bf4,
+	0xb607e007,
+	0x01d00604,
+	0xf804bd00,
+/* 0x084d: i2c_drive_scl_lo */
+	0xe407f100,
+	0x0604b607,
+	0xbd0001d0,
+/* 0x085b: i2c_drive_sda */
+	0xb000f804,
+	0x0bf40036,
+	0xe007f111,
+	0x0604b607,
+	0xbd0002d0,
+/* 0x086f: i2c_drive_sda_lo */
+	0xf100f804,
+	0xb607e407,
+	0x02d00604,
+	0xf804bd00,
+/* 0x087d: i2c_sense_scl */
+	0x0132f400,
+	0x07c437f1,
+	0xcf0634b6,
+	0x31fd0033,
+	0x060bf404,
+/* 0x0893: i2c_sense_scl_done */
+	0xf80131f4,
+/* 0x0895: i2c_sense_sda */
+	0x0132f400,
+	0x07c437f1,
+	0xcf0634b6,
+	0x32fd0033,
+	0x060bf404,
+/* 0x08ab: i2c_sense_sda_done */
+	0xf80131f4,
+/* 0x08ad: i2c_raise_scl */
+	0xf140f900,
+	0xf0089847,
+	0x21f50137,
+/* 0x08ba: i2c_raise_scl_wait */
+	0xe7f10839,
+	0x21f403e8,
+	0x7d21f57e,
+	0x0901f408,
+	0xf40142b6,
+/* 0x08ce: i2c_raise_scl_done */
+	0x40fcef1b,
+/* 0x08d2: i2c_start */
+	0x21f500f8,
+	0x11f4087d,
+	0x9521f50d,
+	0x0611f408,
+/* 0x08e3: i2c_start_rep */
+	0xf0300ef4,
+	0x21f50037,
+	0x37f00839,
+	0x5b21f501,
+	0x0076bb08,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b608ad,
+	0x1f11f404,
+/* 0x0910: i2c_start_send */
+	0xf50037f0,
+	0xf1085b21,
+	0xf41388e7,
+	0x37f07e21,
+	0x3921f500,
+	0x88e7f108,
+	0x7e21f413,
+/* 0x092c: i2c_start_out */
+/* 0x092e: i2c_stop */
+	0x37f000f8,
+	0x3921f500,
+	0x0037f008,
+	0x085b21f5,
+	0x03e8e7f1,
+	0xf07e21f4,
+	0x21f50137,
+	0xe7f10839,
+	0x21f41388,
+	0x0137f07e,
+	0x085b21f5,
+	0x1388e7f1,
+	0xf87e21f4,
+/* 0x0961: i2c_bitw */
+	0x5b21f500,
 	0xe8e7f108,
 	0x7e21f403,
-	0x088421f5,
-	0xb60901f4,
-	0x1bf40142,
-/* 0x08d5: i2c_raise_scl_done */
-	0xf840fcef,
-/* 0x08d9: i2c_start */
-	0x8421f500,
-	0x0d11f408,
-	0x089c21f5,
-	0xf40611f4,
-/* 0x08ea: i2c_start_rep */
-	0x37f0300e,
-	0x4021f500,
-	0x0137f008,
-	0x086221f5,
 	0xb60076bb,
 	0x50f90465,
 	0xbb046594,
 	0x50bd0256,
 	0xfc0475fd,
-	0xb421f550,
+	0xad21f550,
 	0x0464b608,
-/* 0x0917: i2c_start_send */
-	0xf01f11f4,
-	0x21f50037,
-	0xe7f10862,
-	0x21f41388,
-	0x0037f07e,
-	0x084021f5,
-	0x1388e7f1,
-/* 0x0933: i2c_start_out */
-	0xf87e21f4,
-/* 0x0935: i2c_stop */
-	0x0037f000,
-	0x084021f5,
-	0xf50037f0,
-	0xf1086221,
-	0xf403e8e7,
-	0x37f07e21,
-	0x4021f501,
-	0x88e7f108,
-	0x7e21f413,
-	0xf50137f0,
-	0xf1086221,
+	0xf11811f4,
 	0xf41388e7,
-	0x00f87e21,
-/* 0x0968: i2c_bitw */
-	0x086221f5,
-	0x03e8e7f1,
-	0xbb7e21f4,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x08b421f5,
-	0xf40464b6,
-	0xe7f11811,
-	0x21f41388,
-	0x0037f07e,
-	0x084021f5,
-	0x1388e7f1,
-/* 0x09a7: i2c_bitw_out */
-	0xf87e21f4,
-/* 0x09a9: i2c_bitr */
-	0x0137f000,
-	0x086221f5,
-	0x03e8e7f1,
-	0xbb7e21f4,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x08b421f5,
-	0xf40464b6,
-	0x21f51b11,
-	0x37f0089c,
-	0x4021f500,
+	0x37f07e21,
+	0x3921f500,
 	0x88e7f108,
 	0x7e21f413,
-	0xf4013cf0,
-/* 0x09ee: i2c_bitr_done */
-	0x00f80131,
-/* 0x09f0: i2c_get_byte */
-	0xf00057f0,
-/* 0x09f6: i2c_get_byte_next */
-	0x54b60847,
+/* 0x09a0: i2c_bitw_out */
+/* 0x09a2: i2c_bitr */
+	0x37f000f8,
+	0x5b21f501,
+	0xe8e7f108,
+	0x7e21f403,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0xad21f550,
+	0x0464b608,
+	0xf51b11f4,
+	0xf0089521,
+	0x21f50037,
+	0xe7f10839,
+	0x21f41388,
+	0x013cf07e,
+/* 0x09e7: i2c_bitr_done */
+	0xf80131f4,
+/* 0x09e9: i2c_get_byte */
+	0x0057f000,
+/* 0x09ef: i2c_get_byte_next */
+	0xb60847f0,
+	0x76bb0154,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb609a221,
+	0x11f40464,
+	0x0553fd2b,
+	0xf40142b6,
+	0x37f0d81b,
 	0x0076bb01,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
 	0x21f550fc,
-	0x64b609a9,
-	0x2b11f404,
-	0xb60553fd,
-	0x1bf40142,
-	0x0137f0d8,
+	0x64b60961,
+/* 0x0a39: i2c_get_byte_done */
+/* 0x0a3b: i2c_put_byte */
+	0xf000f804,
+/* 0x0a3e: i2c_put_byte_next */
+	0x42b60847,
+	0x3854ff01,
 	0xb60076bb,
 	0x50f90465,
 	0xbb046594,
 	0x50bd0256,
 	0xfc0475fd,
-	0x6821f550,
+	0x6121f550,
 	0x0464b609,
-/* 0x0a40: i2c_get_byte_done */
-/* 0x0a42: i2c_put_byte */
-	0x47f000f8,
-/* 0x0a45: i2c_put_byte_next */
-	0x0142b608,
-	0xbb3854ff,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x096821f5,
-	0xf40464b6,
-	0x46b03411,
-	0xd81bf400,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0xa921f550,
-	0x0464b609,
-	0xbb0f11f4,
-	0x36b00076,
-	0x061bf401,
-/* 0x0a9b: i2c_put_byte_done */
-	0xf80132f4,
-/* 0x0a9d: i2c_addr */
-	0x0076bb00,
+	0xb03411f4,
+	0x1bf40046,
+	0x0076bbd8,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
 	0x21f550fc,
-	0x64b608d9,
-	0x2911f404,
-	0x012ec3e7,
-	0xfd0134b6,
-	0x76bb0553,
+	0x64b609a2,
+	0x0f11f404,
+	0xb00076bb,
+	0x1bf40136,
+	0x0132f406,
+/* 0x0a94: i2c_put_byte_done */
+/* 0x0a96: i2c_addr */
+	0x76bb00f8,
 	0x0465b600,
 	0x659450f9,
 	0x0256bb04,
 	0x75fd50bd,
 	0xf550fc04,
-	0xb60a4221,
-/* 0x0ae2: i2c_addr_done */
-	0x00f80464,
-/* 0x0ae4: i2c_acquire_addr */
-	0xb6f8cec7,
-	0xe0b702e4,
-	0xee980d1c,
-/* 0x0af3: i2c_acquire */
-	0xf500f800,
-	0xf40ae421,
-	0xd9f00421,
+	0xb608d221,
+	0x11f40464,
+	0x2ec3e729,
+	0x0134b601,
+	0xbb0553fd,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x0a3b21f5,
+/* 0x0adb: i2c_addr_done */
+	0xf80464b6,
+/* 0x0add: i2c_acquire_addr */
+	0xf8cec700,
+	0xb702e4b6,
+	0x980d1ce0,
+	0x00f800ee,
+/* 0x0aec: i2c_acquire */
+	0x0add21f5,
+	0xf00421f4,
+	0x21f403d9,
+/* 0x0afb: i2c_release */
+	0xf500f840,
+	0xf40add21,
+	0xdaf00421,
 	0x4021f403,
-/* 0x0b02: i2c_release */
-	0x21f500f8,
-	0x21f40ae4,
-	0x03daf004,
-	0xf84021f4,
-/* 0x0b11: i2c_recv */
-	0x0132f400,
-	0xb6f8c1c7,
-	0x16b00214,
-	0x3a1ff528,
-	0xf413a001,
-	0x0032980c,
-	0x0ccc13a0,
-	0xf4003198,
-	0xd0f90231,
-	0xd0f9e0f9,
-	0x000067f1,
-	0x100063f1,
-	0xbb016792,
+/* 0x0b0a: i2c_recv */
+	0x32f400f8,
+	0xf8c1c701,
+	0xb00214b6,
+	0x1ff52816,
+	0x13a0013a,
+	0x32980cf4,
+	0xcc13a000,
+	0x0031980c,
+	0xf90231f4,
+	0xf9e0f9d0,
+	0x0067f1d0,
+	0x0063f100,
+	0x01679210,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0xec21f550,
+	0x0464b60a,
+	0xd6b0d0fc,
+	0xb31bf500,
+	0x0057f000,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x9621f550,
+	0x0464b60a,
+	0x00d011f5,
+	0xbbe0c5c7,
 	0x65b60076,
 	0x9450f904,
 	0x56bb0465,
 	0xfd50bd02,
 	0x50fc0475,
-	0x0af321f5,
-	0xfc0464b6,
-	0x00d6b0d0,
-	0x00b31bf5,
-	0xbb0057f0,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x0a9d21f5,
+	0x0a3b21f5,
 	0xf50464b6,
-	0xc700d011,
-	0x76bbe0c5,
+	0xf000ad11,
+	0x76bb0157,
 	0x0465b600,
 	0x659450f9,
 	0x0256bb04,
 	0x75fd50bd,
 	0xf550fc04,
-	0xb60a4221,
+	0xb60a9621,
 	0x11f50464,
-	0x57f000ad,
-	0x0076bb01,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x21f550fc,
-	0x64b60a9d,
-	0x8a11f504,
-	0x0076bb00,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x21f550fc,
-	0x64b609f0,
-	0x6a11f404,
-	0xbbe05bcb,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x093521f5,
-	0xb90464b6,
-	0x74bd025b,
-/* 0x0c17: i2c_recv_not_rd08 */
-	0xb0430ef4,
-	0x1bf401d6,
-	0x0057f03d,
-	0x0a9d21f5,
-	0xc73311f4,
-	0x21f5e0c5,
-	0x11f40a42,
-	0x0057f029,
-	0x0a9d21f5,
-	0xc71f11f4,
-	0x21f5e0b5,
-	0x11f40a42,
-	0x3521f515,
-	0xc774bd09,
-	0x1bf408c5,
-	0x0232f409,
-/* 0x0c57: i2c_recv_not_wr08 */
-/* 0x0c57: i2c_recv_done */
-	0xc7030ef4,
-	0x21f5f8ce,
-	0xe0fc0b02,
-	0x12f4d0fc,
-	0x027cb90a,
-	0x033621f5,
-/* 0x0c6c: i2c_recv_exit */
-/* 0x0c6e: i2c_init */
+	0x76bb008a,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb609e921,
+	0x11f40464,
+	0xe05bcb6a,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x2e21f550,
+	0x0464b609,
+	0xbd025bb9,
+	0x430ef474,
+/* 0x0c10: i2c_recv_not_rd08 */
+	0xf401d6b0,
+	0x57f03d1b,
+	0x9621f500,
+	0x3311f40a,
+	0xf5e0c5c7,
+	0xf40a3b21,
+	0x57f02911,
+	0x9621f500,
+	0x1f11f40a,
+	0xf5e0b5c7,
+	0xf40a3b21,
+	0x21f51511,
+	0x74bd092e,
+	0xf408c5c7,
+	0x32f4091b,
+	0x030ef402,
+/* 0x0c50: i2c_recv_not_wr08 */
+/* 0x0c50: i2c_recv_done */
+	0xf5f8cec7,
+	0xfc0afb21,
+	0xf4d0fce0,
+	0x7cb90a12,
+	0x3621f502,
+/* 0x0c65: i2c_recv_exit */
+/* 0x0c67: i2c_init */
+	0xf800f803,
+/* 0x0c69: test_recv */
+	0xd817f100,
+	0x0614b605,
+	0xb60011cf,
+	0x07f10110,
+	0x04b605d8,
+	0x0001d006,
+	0xe7f104bd,
+	0xe3f1d900,
+	0x21f5134f,
+	0x00f80256,
+/* 0x0c90: test_init */
+	0x0800e7f1,
+	0x025621f5,
+/* 0x0c9a: idle_recv */
 	0x00f800f8,
-/* 0x0c70: test_recv */
-	0x05d817f1,
-	0xcf0614b6,
-	0x10b60011,
-	0xd807f101,
-	0x0604b605,
-	0xbd0001d0,
-	0x00e7f104,
-	0x4fe3f1d9,
-	0x5621f513,
-/* 0x0c97: test_init */
-	0xf100f802,
-	0xf50800e7,
-	0xf8025621,
-/* 0x0ca1: idle_recv */
-/* 0x0ca3: idle */
-	0xf400f800,
-	0x17f10031,
-	0x14b605d4,
-	0x0011cf06,
-	0xf10110b6,
-	0xb605d407,
-	0x01d00604,
-/* 0x0cbf: idle_loop */
-	0xf004bd00,
-	0x32f45817,
-/* 0x0cc5: idle_proc */
-/* 0x0cc5: idle_proc_exec */
-	0xb910f902,
-	0x21f5021e,
-	0x10fc033f,
-	0xf40911f4,
-	0x0ef40231,
-/* 0x0cd9: idle_proc_next */
-	0x5810b6ef,
-	0xf4061fb8,
-	0x02f4e61b,
-	0x0028f4dd,
-	0x00bb0ef4,
+/* 0x0c9c: idle */
+	0xf10031f4,
+	0xb605d417,
+	0x11cf0614,
+	0x0110b600,
+	0x05d407f1,
+	0xd00604b6,
+	0x04bd0001,
+/* 0x0cb8: idle_loop */
+	0xf45817f0,
+/* 0x0cbe: idle_proc */
+/* 0x0cbe: idle_proc_exec */
+	0x10f90232,
+	0xf5021eb9,
+	0xfc033f21,
+	0x0911f410,
+	0xf40231f4,
+/* 0x0cd2: idle_proc_next */
+	0x10b6ef0e,
+	0x061fb858,
+	0xf4e61bf4,
+	0x28f4dd02,
+	0xbb0ef400,
+	0x00000000,
+	0x00000000,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
index ec03f9a..1663bf9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
@@ -82,15 +82,15 @@
 // $r0  - zero
 memx_func_enter:
 #if NVKM_PPWR_CHIPSET == GT215
-	movw $r8 0x1610
+	mov $r8 0x1610
 	nv_rd32($r7, $r8)
 	imm32($r6, 0xfffffffc)
 	and $r7 $r6
-	movw $r6 0x2
+	mov $r6 0x2
 	or $r7 $r6
 	nv_wr32($r8, $r7)
 #else
-	movw $r6 0x001620
+	mov $r6 0x001620
 	imm32($r7, ~0x00000aa2);
 	nv_rd32($r8, $r6)
 	and $r8 $r7
@@ -101,7 +101,7 @@
 	and $r8 $r7
 	nv_wr32($r6, $r8)
 
-	movw $r6 0x0026f0
+	mov $r6 0x0026f0
 	nv_rd32($r8, $r6)
 	and $r8 $r7
 	nv_wr32($r6, $r8)
@@ -136,19 +136,19 @@
 		bra nz #memx_func_leave_wait
 
 #if NVKM_PPWR_CHIPSET == GT215
-	movw $r8 0x1610
+	mov $r8 0x1610
 	nv_rd32($r7, $r8)
 	imm32($r6, 0xffffffcc)
 	and $r7 $r6
 	nv_wr32($r8, $r7)
 #else
-	movw $r6 0x0026f0
+	mov $r6 0x0026f0
 	imm32($r7, 0x00000001)
 	nv_rd32($r8, $r6)
 	or $r8 $r7
 	nv_wr32($r6, $r8)
 
-	movw $r6 0x001620
+	mov $r6 0x001620
 	nv_rd32($r8, $r6)
 	or $r8 $r7
 	nv_wr32($r6, $r8)
@@ -177,11 +177,11 @@
 	bra #memx_func_wait_vblank_fini
 
 	memx_func_wait_vblank_head1:
-	movw $r7 0x20
+	mov $r7 0x20
 	bra #memx_func_wait_vblank_0
 
 	memx_func_wait_vblank_head0:
-	movw $r7 0x8
+	mov $r7 0x8
 
 	memx_func_wait_vblank_0:
 		nv_iord($r6, NV_PPWR_INPUT)
@@ -273,13 +273,13 @@
 // $r5 - outer loop counter
 // $r6 - inner loop counter
 // $r7 - entry counter (#memx_train_head + $r7)
-	movw $r5 0x3
-	movw $r7 0x0
+	mov $r5 0x3
+	mov $r7 0x0
 
 // Read random memory to wake up... things
 	imm32($r9, 0x700000)
 	nv_rd32($r8,$r9)
-	movw $r14 0x2710
+	mov $r14 0x2710
 	call(nsec)
 
 	memx_func_train_loop_outer:
@@ -289,9 +289,9 @@
 		nv_wr32($r9, $r8)
 		push $r5
 
-		movw $r6 0x0
+		mov $r6 0x0
 		memx_func_train_loop_inner:
-			movw $r8 0x1111
+			mov $r8 0x1111
 			mulu $r9 $r6 $r8
 			shl b32 $r8 $r9 0x10
 			or $r8 $r9
@@ -315,7 +315,7 @@
 
 			// $r5 - inner inner loop counter
 			// $r9 - result
-			movw $r5 0
+			mov $r5 0
 			imm32($r9, 0x8300ffff)
 			memx_func_train_loop_4x:
 				imm32($r10, 0x100080)
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 6f65846..5b2a9f9 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1250,7 +1250,7 @@
 		.width = 154,
 		.height = 83,
 	},
-	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
 };
 
 static const struct drm_display_mode ortustech_com43h4m85ulc_mode  = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index b70f942..cab4d60 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -64,7 +64,6 @@
 	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
 	 */
 	vma->vm_flags &= ~VM_PFNMAP;
-	vma->vm_pgoff = 0;
 
 	ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
 			     obj->size, rk_obj->dma_attrs);
@@ -96,6 +95,12 @@
 	if (ret)
 		return ret;
 
+	/*
+	 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
+	 * whole buffer from the start.
+	 */
+	vma->vm_pgoff = 0;
+
 	obj = vma->vm_private_data;
 
 	return rockchip_drm_gem_object_mmap(obj, vma);
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
index d401156..4460ca4 100644
--- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
@@ -129,10 +129,13 @@
 static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees)
 {
 	struct sun4i_dclk *dclk = hw_to_dclk(hw);
+	u32 val = degrees / 120;
+
+	val <<= 28;
 
 	regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG,
 			   GENMASK(29, 28),
-			   degrees / 120);
+			   val);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index ec9023b..d53e805 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -80,6 +80,7 @@
 	struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
 
 	if (bo->validated_shader) {
+		kfree(bo->validated_shader->uniform_addr_offsets);
 		kfree(bo->validated_shader->texture_samples);
 		kfree(bo->validated_shader);
 		bo->validated_shader = NULL;
@@ -328,6 +329,7 @@
 	}
 
 	if (bo->validated_shader) {
+		kfree(bo->validated_shader->uniform_addr_offsets);
 		kfree(bo->validated_shader->texture_samples);
 		kfree(bo->validated_shader);
 		bo->validated_shader = NULL;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 881bf48..7505655 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -533,7 +533,7 @@
 	 * the scl fields here.
 	 */
 	if (num_planes == 1) {
-		scl0 = vc4_get_scl_field(state, 1);
+		scl0 = vc4_get_scl_field(state, 0);
 		scl1 = scl0;
 	} else {
 		scl0 = vc4_get_scl_field(state, 1);
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 917321c..19a5bde8 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -874,6 +874,7 @@
 fail:
 	kfree(validation_state.branch_targets);
 	if (validated_shader) {
+		kfree(validated_shader->uniform_addr_offsets);
 		kfree(validated_shader->texture_samples);
 		kfree(validated_shader);
 	}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 818478b..5463939 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -194,6 +194,9 @@
 	case VIRTGPU_PARAM_3D_FEATURES:
 		value = vgdev->has_virgl_3d == true ? 1 : 0;
 		break;
+	case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
+		value = 1;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -469,7 +472,7 @@
 {
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct drm_virtgpu_get_caps *args = data;
-	int size;
+	unsigned size, host_caps_size;
 	int i;
 	int found_valid = -1;
 	int ret;
@@ -478,6 +481,10 @@
 	if (vgdev->num_capsets == 0)
 		return -ENOSYS;
 
+	/* don't allow userspace to pass 0 */
+	if (args->size == 0)
+		return -EINVAL;
+
 	spin_lock(&vgdev->display_info_lock);
 	for (i = 0; i < vgdev->num_capsets; i++) {
 		if (vgdev->capsets[i].id == args->cap_set_id) {
@@ -493,11 +500,9 @@
 		return -EINVAL;
 	}
 
-	size = vgdev->capsets[found_valid].max_size;
-	if (args->size > size) {
-		spin_unlock(&vgdev->display_info_lock);
-		return -EINVAL;
-	}
+	host_caps_size = vgdev->capsets[found_valid].max_size;
+	/* only copy to user the minimum of the host caps size or the guest caps size */
+	size = min(args->size, host_caps_size);
 
 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
 		if (cache_ent->id == args->cap_set_id &&
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5a0f8a7..52436b3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -324,7 +324,7 @@
 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
 	if (ret == -ENOSPC) {
 		spin_unlock(&vgdev->ctrlq.qlock);
-		wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
+		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
 		spin_lock(&vgdev->ctrlq.qlock);
 		goto retry;
 	} else {
@@ -399,7 +399,7 @@
 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
 	if (ret == -ENOSPC) {
 		spin_unlock(&vgdev->cursorq.qlock);
-		wait_event(vgdev->cursorq.ack_queue, vq->num_free);
+		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
 		spin_lock(&vgdev->cursorq.qlock);
 		goto retry;
 	} else {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 87086af..33ca24a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2014,6 +2014,7 @@
 		vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
 					     out_fence, NULL);
 
+	vmw_dmabuf_unreference(&ctx->buf);
 	vmw_resource_unreserve(res, false, NULL, 0);
 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
index 557a033..8545488 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
@@ -135,17 +135,24 @@
 
 #else
 
-/* In the 32-bit version of this macro, we use "m" because there is no
- * more register left for bp
+/*
+ * In the 32-bit version of this macro, we store bp in a memory location
+ * because we've ran out of registers.
+ * Now we can't reference that memory location while we've modified
+ * %esp or %ebp, so we first push it on the stack, just before we push
+ * %ebp, and then when we need it we read it from the stack where we
+ * just pushed it.
  */
 #define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di,	\
 			port_num, magic, bp,		\
 			eax, ebx, ecx, edx, si, di)	\
 ({							\
-	asm volatile ("push %%ebp;"			\
-		"mov %12, %%ebp;"			\
+	asm volatile ("push %12;"			\
+		"push %%ebp;"				\
+		"mov 0x04(%%esp), %%ebp;"		\
 		"rep outsb;"				\
-		"pop %%ebp;" :				\
+		"pop %%ebp;"				\
+		"add $0x04, %%esp;" :			\
 		"=a"(eax),				\
 		"=b"(ebx),				\
 		"=c"(ecx),				\
@@ -167,10 +174,12 @@
 		       port_num, magic, bp,		\
 		       eax, ebx, ecx, edx, si, di)	\
 ({							\
-	asm volatile ("push %%ebp;"			\
-		"mov %12, %%ebp;"			\
+	asm volatile ("push %12;"			\
+		"push %%ebp;"				\
+		"mov 0x04(%%esp), %%ebp;"		\
 		"rep insb;"				\
-		"pop %%ebp" :				\
+		"pop %%ebp;"				\
+		"add $0x04, %%esp;" :			\
 		"=a"(eax),				\
 		"=b"(ebx),				\
 		"=c"(ecx),				\
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 8e21958..fbf298d 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -1873,8 +1873,11 @@
 		ret = gpudev->oob_set(adreno_dev, OOB_PERFCNTR_SET_MASK,
 				OOB_PERFCNTR_CHECK_MASK,
 				OOB_PERFCNTR_CLEAR_MASK);
-		if (ret)
+		if (ret) {
+			adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
+			adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
 			kgsl_active_count_put(KGSL_DEVICE(adreno_dev));
+		}
 	}
 
 	return ret;
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index e5c8222..4f98912 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1332,7 +1332,7 @@
 	iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
 	if (iommu_regs)
 		adreno_set_protected_registers(adreno_dev, &index,
-				iommu_regs->base, iommu_regs->range);
+				iommu_regs->base, ilog2(iommu_regs->range));
 }
 
 static void a3xx_start(struct adreno_device *adreno_dev)
diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c
index 771d035..432e98d 100644
--- a/drivers/gpu/msm/adreno_a4xx.c
+++ b/drivers/gpu/msm/adreno_a4xx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -524,7 +524,7 @@
 	iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
 	if (iommu_regs)
 		adreno_set_protected_registers(adreno_dev, &index,
-				iommu_regs->base, iommu_regs->range);
+				iommu_regs->base, ilog2(iommu_regs->range));
 }
 
 static struct adreno_snapshot_sizes a4xx_snap_sizes = {
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 876b7c9..68a7c2a 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -386,7 +386,7 @@
 	iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
 	if (iommu_regs)
 		adreno_set_protected_registers(adreno_dev, &index,
-				iommu_regs->base, iommu_regs->range);
+				iommu_regs->base, ilog2(iommu_regs->range));
 }
 
 /*
@@ -2281,6 +2281,7 @@
 	switch (ADRENO_GPUREV(adreno_dev)) {
 	case ADRENO_REV_A510:
 		return 0x00000001; /* Ucode workaround for token end syncs */
+	case ADRENO_REV_A504:
 	case ADRENO_REV_A505:
 	case ADRENO_REV_A506:
 	case ADRENO_REV_A530:
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index d1a6005..4bde8c6 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -412,6 +412,15 @@
 	0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
 	/* VPC CTX 1 */
 	0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2,
+};
+
+/*
+ * GPMU registers to dump for A5XX on snapshot.
+ * Registers in pairs - first value is the start offset, second
+ * is the stop offset (inclusive)
+ */
+
+static const unsigned int a5xx_gpmu_registers[] = {
 	/* GPMU */
 	0xA800, 0xA8FF, 0xAC60, 0xAC60,
 };
@@ -664,24 +673,23 @@
 	return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
 }
 
+struct registers {
+	const unsigned int *regs;
+	size_t size;
+};
+
 static size_t a5xx_legacy_snapshot_registers(struct kgsl_device *device,
-		u8 *buf, size_t remain)
+		u8 *buf, size_t remain, const unsigned int *regs, size_t size)
 {
-	struct kgsl_snapshot_registers regs = {
-		.regs = a5xx_registers,
-		.count = ARRAY_SIZE(a5xx_registers) / 2,
+	struct kgsl_snapshot_registers snapshot_regs = {
+		.regs = regs,
+		.count = size / 2,
 	};
 
-	return kgsl_snapshot_dump_registers(device, buf, remain, &regs);
+	return kgsl_snapshot_dump_registers(device, buf, remain,
+			&snapshot_regs);
 }
 
-static struct cdregs {
-	const unsigned int *regs;
-	unsigned int size;
-} _a5xx_cd_registers[] = {
-	{ a5xx_registers, ARRAY_SIZE(a5xx_registers) },
-};
-
 #define REG_PAIR_COUNT(_a, _i) \
 	(((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
 
@@ -691,11 +699,13 @@
 	struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
 	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
 	unsigned int *src = (unsigned int *) registers.hostptr;
-	unsigned int i, j, k;
+	struct registers *regs = (struct registers *)priv;
+	unsigned int j, k;
 	unsigned int count = 0;
 
 	if (crash_dump_valid == false)
-		return a5xx_legacy_snapshot_registers(device, buf, remain);
+		return a5xx_legacy_snapshot_registers(device, buf, remain,
+				regs->regs, regs->size);
 
 	if (remain < sizeof(*header)) {
 		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
@@ -704,24 +714,20 @@
 
 	remain -= sizeof(*header);
 
-	for (i = 0; i < ARRAY_SIZE(_a5xx_cd_registers); i++) {
-		struct cdregs *regs = &_a5xx_cd_registers[i];
+	for (j = 0; j < regs->size / 2; j++) {
+		unsigned int start = regs->regs[2 * j];
+		unsigned int end = regs->regs[(2 * j) + 1];
 
-		for (j = 0; j < regs->size / 2; j++) {
-			unsigned int start = regs->regs[2 * j];
-			unsigned int end = regs->regs[(2 * j) + 1];
+		if (remain < ((end - start) + 1) * 8) {
+			SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+			goto out;
+		}
 
-			if (remain < ((end - start) + 1) * 8) {
-				SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
-				goto out;
-			}
+		remain -= ((end - start) + 1) * 8;
 
-			remain -= ((end - start) + 1) * 8;
-
-			for (k = start; k <= end; k++, count++) {
-				*data++ = k;
-				*data++ = *src++;
-			}
+		for (k = start; k <= end; k++, count++) {
+			*data++ = k;
+			*data++ = *src++;
 		}
 	}
 
@@ -861,6 +867,7 @@
 	struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
 	unsigned int reg, i;
 	struct adreno_ringbuffer *rb;
+	struct registers regs;
 
 	/* Disable Clock gating temporarily for the debug bus to work */
 	a5xx_hwcg_set(adreno_dev, false);
@@ -877,8 +884,20 @@
 	/* Try to run the crash dumper */
 	_a5xx_do_crashdump(device);
 
-	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
-		snapshot, a5xx_snapshot_registers, NULL);
+	regs.regs = a5xx_registers;
+	regs.size = ARRAY_SIZE(a5xx_registers);
+
+	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot,
+			a5xx_snapshot_registers, &regs);
+
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) {
+		regs.regs = a5xx_gpmu_registers;
+		regs.size = ARRAY_SIZE(a5xx_gpmu_registers);
+
+		kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
+				snapshot, a5xx_snapshot_registers, &regs);
+	}
+
 
 	/* Dump SP TP HLSQ registers */
 	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot,
@@ -1035,17 +1054,23 @@
 	 * To save the registers, we need 16 bytes per register pair for the
 	 * script and a dword for each register int the data
 	 */
-	for (i = 0; i < ARRAY_SIZE(_a5xx_cd_registers); i++) {
-		struct cdregs *regs = &_a5xx_cd_registers[i];
 
+	/* Each pair needs 16 bytes (2 qwords) */
+	script_size += (ARRAY_SIZE(a5xx_registers) / 2) * 16;
+
+	/* Each register needs a dword in the data */
+	for (j = 0; j < ARRAY_SIZE(a5xx_registers) / 2; j++)
+		data_size += REG_PAIR_COUNT(a5xx_registers, j) *
+			sizeof(unsigned int);
+
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) {
 		/* Each pair needs 16 bytes (2 qwords) */
-		script_size += (regs->size / 2) * 16;
+		script_size += (ARRAY_SIZE(a5xx_gpmu_registers) / 2) * 16;
 
 		/* Each register needs a dword in the data */
-		for (j = 0; j < regs->size / 2; j++)
-			data_size += REG_PAIR_COUNT(regs->regs, j) *
+		for (j = 0; j < ARRAY_SIZE(a5xx_gpmu_registers) / 2; j++)
+			data_size += REG_PAIR_COUNT(a5xx_gpmu_registers, j) *
 				sizeof(unsigned int);
-
 	}
 
 	/*
@@ -1083,13 +1108,21 @@
 	ptr = (uint64_t *) capturescript.hostptr;
 
 	/* For the registers, program a read command for each pair */
-	for (i = 0; i < ARRAY_SIZE(_a5xx_cd_registers); i++) {
-		struct cdregs *regs = &_a5xx_cd_registers[i];
 
-		for (j = 0; j < regs->size / 2; j++) {
-			unsigned int r = REG_PAIR_COUNT(regs->regs, j);
+	for (j = 0; j < ARRAY_SIZE(a5xx_registers) / 2; j++) {
+		unsigned int r = REG_PAIR_COUNT(a5xx_registers, j);
+		*ptr++ = registers.gpuaddr + offset;
+		*ptr++ = (((uint64_t) a5xx_registers[2 * j]) << 44)
+			| r;
+		offset += r * sizeof(unsigned int);
+	}
+
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) {
+		for (j = 0; j < ARRAY_SIZE(a5xx_gpmu_registers) / 2; j++) {
+			unsigned int r = REG_PAIR_COUNT(a5xx_gpmu_registers, j);
 			*ptr++ = registers.gpuaddr + offset;
-			*ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
+			*ptr++ = (((uint64_t) a5xx_gpmu_registers[2 * j]) << 44)
+				| r;
 			offset += r * sizeof(unsigned int);
 		}
 	}
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 37330cb..517b813 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -446,7 +446,7 @@
 
 	if (mmu_prot) {
 		mmu_base = mmu_prot->base;
-		mmu_range = 1 << mmu_prot->range;
+		mmu_range = mmu_prot->range;
 		req_sets += DIV_ROUND_UP(mmu_range, 0x2000);
 	}
 
diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c
index 6c8e664..132fb02 100644
--- a/drivers/gpu/msm/adreno_a6xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a6xx_preempt.c
@@ -49,8 +49,13 @@
 				OOB_PREEMPTION_SET_MASK,
 				OOB_PREEMPTION_CHECK_MASK,
 				OOB_PREEMPTION_CLEAR_MASK);
-			if (status)
+			if (status) {
+				adreno_set_gpu_fault(adreno_dev,
+					ADRENO_GMU_FAULT);
+				adreno_dispatcher_schedule(
+					KGSL_DEVICE(adreno_dev));
 				return;
+			}
 		}
 	}
 
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index a2d6071..3b55fc6 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2388,7 +2388,6 @@
 	struct kgsl_gpuobj_import *param = data;
 	struct kgsl_mem_entry *entry;
 	int ret, fd = -1;
-	struct kgsl_mmu *mmu = &dev_priv->device->mmu;
 
 	entry = kgsl_mem_entry_create();
 	if (entry == NULL)
@@ -2402,18 +2401,10 @@
 			| KGSL_MEMFLAGS_FORCE_32BIT
 			| KGSL_MEMFLAGS_IOCOHERENT;
 
-	/* Disable IO coherence if it is not supported on the chip */
-	if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
-		param->flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);
-
 	if (kgsl_is_compat_task())
 		param->flags |= KGSL_MEMFLAGS_FORCE_32BIT;
 
-	entry->memdesc.flags = param->flags;
-
-	if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
-		entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
-
+	kgsl_memdesc_init(dev_priv->device, &entry->memdesc, param->flags);
 	if (param->type == KGSL_USER_MEM_TYPE_ADDR)
 		ret = _gpuobj_map_useraddr(dev_priv->device, private->pagetable,
 			entry, param);
@@ -2652,6 +2643,7 @@
 	struct kgsl_process_private *private = dev_priv->process_priv;
 	struct kgsl_mmu *mmu = &dev_priv->device->mmu;
 	unsigned int memtype;
+	uint64_t flags;
 
 	/*
 	 * If content protection is not enabled and secure buffer
@@ -2688,30 +2680,17 @@
 	 * Note: CACHEMODE is ignored for this call. Caching should be
 	 * determined by type of allocation being mapped.
 	 */
-	param->flags &= KGSL_MEMFLAGS_GPUREADONLY
-			| KGSL_MEMTYPE_MASK
-			| KGSL_MEMALIGN_MASK
-			| KGSL_MEMFLAGS_USE_CPU_MAP
-			| KGSL_MEMFLAGS_SECURE
-			| KGSL_MEMFLAGS_IOCOHERENT;
-
-	/* Disable IO coherence if it is not supported on the chip */
-	if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
-		param->flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);
-
-	entry->memdesc.flags = (uint64_t) param->flags;
+	flags = param->flags & (KGSL_MEMFLAGS_GPUREADONLY
+				| KGSL_MEMTYPE_MASK
+				| KGSL_MEMALIGN_MASK
+				| KGSL_MEMFLAGS_USE_CPU_MAP
+				| KGSL_MEMFLAGS_SECURE
+				| KGSL_MEMFLAGS_IOCOHERENT);
 
 	if (kgsl_is_compat_task())
-		entry->memdesc.flags |= KGSL_MEMFLAGS_FORCE_32BIT;
+		flags |= KGSL_MEMFLAGS_FORCE_32BIT;
 
-	if (!kgsl_mmu_use_cpu_map(mmu))
-		entry->memdesc.flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
-
-	if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
-		entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
-
-	if (param->flags & KGSL_MEMFLAGS_SECURE)
-		entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
+	kgsl_memdesc_init(dev_priv->device, &entry->memdesc, flags);
 
 	switch (memtype) {
 	case KGSL_MEM_ENTRY_USER:
@@ -3107,10 +3086,6 @@
 		| KGSL_MEMFLAGS_FORCE_32BIT
 		| KGSL_MEMFLAGS_IOCOHERENT;
 
-	/* Turn off SVM if the system doesn't support it */
-	if (!kgsl_mmu_use_cpu_map(mmu))
-		flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
-
 	/* Return not supported error if secure memory isn't enabled */
 	if (!kgsl_mmu_is_secured(mmu) &&
 			(flags & KGSL_MEMFLAGS_SECURE)) {
@@ -3119,10 +3094,6 @@
 		return ERR_PTR(-EOPNOTSUPP);
 	}
 
-	/* Secure memory disables advanced addressing modes */
-	if (flags & KGSL_MEMFLAGS_SECURE)
-		flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
-
 	/* Cap the alignment bits to the highest number we can handle */
 	align = MEMFLAGS(flags, KGSL_MEMALIGN_MASK, KGSL_MEMALIGN_SHIFT);
 	if (align >= ilog2(KGSL_MAX_ALIGN)) {
@@ -3141,20 +3112,10 @@
 
 	flags = kgsl_filter_cachemode(flags);
 
-	/* Disable IO coherence if it is not supported on the chip */
-	if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
-		flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);
-
 	entry = kgsl_mem_entry_create();
 	if (entry == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
-		entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
-
-	if (flags & KGSL_MEMFLAGS_SECURE)
-		entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
-
 	ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
 		size, flags);
 	if (ret != 0)
@@ -3338,6 +3299,7 @@
 	struct kgsl_process_private *process = dev_priv->process_priv;
 	struct kgsl_sparse_phys_alloc *param = data;
 	struct kgsl_mem_entry *entry;
+	uint64_t flags;
 	int ret;
 	int id;
 
@@ -3370,11 +3332,12 @@
 	entry->id = id;
 	entry->priv = process;
 
-	entry->memdesc.flags = KGSL_MEMFLAGS_SPARSE_PHYS;
-	kgsl_memdesc_set_align(&entry->memdesc, ilog2(param->pagesize));
+	flags = KGSL_MEMFLAGS_SPARSE_PHYS |
+		((ilog2(param->pagesize) << KGSL_MEMALIGN_SHIFT) &
+			KGSL_MEMALIGN_MASK);
 
 	ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
-			param->size, entry->memdesc.flags);
+			param->size, flags);
 	if (ret)
 		goto err_remove_idr;
 
@@ -3463,7 +3426,8 @@
 	if (entry == NULL)
 		return -ENOMEM;
 
-	entry->memdesc.flags = KGSL_MEMFLAGS_SPARSE_VIRT;
+	kgsl_memdesc_init(dev_priv->device, &entry->memdesc,
+			KGSL_MEMFLAGS_SPARSE_VIRT);
 	entry->memdesc.size = param->size;
 	entry->memdesc.cur_bindings = 0;
 	kgsl_memdesc_set_align(&entry->memdesc, ilog2(param->pagesize));
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 325d44a..3539cda 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -262,13 +262,12 @@
 		return;
 	}
 
-	gpu_qdss_desc.flags = 0;
+	kgsl_memdesc_init(device, &gpu_qdss_desc, 0);
 	gpu_qdss_desc.priv = 0;
 	gpu_qdss_desc.physaddr = gpu_qdss_entry[0];
 	gpu_qdss_desc.size = gpu_qdss_entry[1];
 	gpu_qdss_desc.pagetable = NULL;
 	gpu_qdss_desc.ops = NULL;
-	gpu_qdss_desc.dev = device->dev->parent;
 	gpu_qdss_desc.hostptr = NULL;
 
 	result = memdesc_sg_dma(&gpu_qdss_desc, gpu_qdss_desc.physaddr,
@@ -307,13 +306,12 @@
 		return;
 	}
 
-	gpu_qtimer_desc.flags = 0;
+	kgsl_memdesc_init(device, &gpu_qtimer_desc, 0);
 	gpu_qtimer_desc.priv = 0;
 	gpu_qtimer_desc.physaddr = gpu_qtimer_entry[0];
 	gpu_qtimer_desc.size = gpu_qtimer_entry[1];
 	gpu_qtimer_desc.pagetable = NULL;
 	gpu_qtimer_desc.ops = NULL;
-	gpu_qtimer_desc.dev = device->dev->parent;
 	gpu_qtimer_desc.hostptr = NULL;
 
 	result = memdesc_sg_dma(&gpu_qtimer_desc, gpu_qtimer_desc.physaddr,
@@ -1486,6 +1484,7 @@
 {
 	int ret;
 
+	kgsl_memdesc_init(device, &iommu->setstate, 0);
 	ret = kgsl_sharedmem_alloc_contig(device, &iommu->setstate, PAGE_SIZE);
 
 	if (!ret) {
@@ -2630,7 +2629,7 @@
 		return -EINVAL;
 	}
 	iommu->protect.base = reg_val[0] / sizeof(u32);
-	iommu->protect.range = ilog2(reg_val[1] / sizeof(u32));
+	iommu->protect.range = reg_val[1] / sizeof(u32);
 
 	of_property_for_each_string(node, "clock-names", prop, cname) {
 		struct clk *c = devm_clk_get(&pdev->dev, cname);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 14653ea..df88b9a 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -413,7 +413,7 @@
 {
 	int ret;
 
-	memdesc->flags = flags;
+	kgsl_memdesc_init(device, memdesc, flags);
 
 	if (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE)
 		ret = kgsl_sharedmem_alloc_contig(device, memdesc, size);
@@ -769,6 +769,40 @@
 }
 EXPORT_SYMBOL(kgsl_cache_range_op);
 
+void kgsl_memdesc_init(struct kgsl_device *device,
+			struct kgsl_memdesc *memdesc, uint64_t flags)
+{
+	struct kgsl_mmu *mmu = &device->mmu;
+	unsigned int align;
+
+	memset(memdesc, 0, sizeof(*memdesc));
+	/* Turn off SVM if the system doesn't support it */
+	if (!kgsl_mmu_use_cpu_map(mmu))
+		flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
+
+	/* Secure memory disables advanced addressing modes */
+	if (flags & KGSL_MEMFLAGS_SECURE)
+		flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
+
+	/* Disable IO coherence if it is not supported on the chip */
+	if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
+		flags &= ~((uint64_t) KGSL_MEMFLAGS_IOCOHERENT);
+
+	if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
+		memdesc->priv |= KGSL_MEMDESC_GUARD_PAGE;
+
+	if (flags & KGSL_MEMFLAGS_SECURE)
+		memdesc->priv |= KGSL_MEMDESC_SECURE;
+
+	memdesc->flags = flags;
+	memdesc->dev = device->dev->parent;
+
+	align = max_t(unsigned int,
+		(memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT,
+		ilog2(PAGE_SIZE));
+	kgsl_memdesc_set_align(memdesc, align);
+}
+
 int
 kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
 			uint64_t size)
@@ -969,8 +1003,6 @@
 
 	if (memdesc->pages)
 		kgsl_free(memdesc->pages);
-
-	memset(memdesc, 0, sizeof(*memdesc));
 }
 EXPORT_SYMBOL(kgsl_sharedmem_free);
 
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 55bb34f..976752d 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -57,6 +57,9 @@
 			uint64_t offset, uint64_t size,
 			unsigned int op);
 
+void kgsl_memdesc_init(struct kgsl_device *device,
+			struct kgsl_memdesc *memdesc, uint64_t flags);
+
 void kgsl_process_init_sysfs(struct kgsl_device *device,
 		struct kgsl_process_private *private);
 void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
@@ -282,8 +285,8 @@
 {
 	int ret;
 
-	memdesc->flags = flags;
-	memdesc->priv = priv;
+	kgsl_memdesc_init(device, memdesc, flags);
+	memdesc->priv |= priv;
 
 	if (((memdesc->priv & KGSL_MEMDESC_CONTIG) != 0) ||
 		(kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE))
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 43617fb..317c9c2 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -37,6 +37,8 @@
 static void kovaplus_profile_activated(struct kovaplus_device *kovaplus,
 		uint new_profile_index)
 {
+	if (new_profile_index >= ARRAY_SIZE(kovaplus->profile_settings))
+		return;
 	kovaplus->actual_profile = new_profile_index;
 	kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level;
 	kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x;
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index ce75dd4..2b31b84 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -1393,7 +1393,7 @@
 		duty_is_dc = data->REG_PWM_MODE[i] &&
 		  (nct6775_read_value(data, data->REG_PWM_MODE[i])
 		   & data->PWM_MODE_MASK[i]);
-		data->pwm_mode[i] = duty_is_dc;
+		data->pwm_mode[i] = !duty_is_dc;
 
 		fanmodecfg = nct6775_read_value(data, data->REG_FAN_MODE[i]);
 		for (j = 0; j < ARRAY_SIZE(data->REG_PWM); j++) {
@@ -2270,7 +2270,7 @@
 	struct nct6775_data *data = nct6775_update_device(dev);
 	struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
 
-	return sprintf(buf, "%d\n", !data->pwm_mode[sattr->index]);
+	return sprintf(buf, "%d\n", data->pwm_mode[sattr->index]);
 }
 
 static ssize_t
@@ -2291,9 +2291,9 @@
 	if (val > 1)
 		return -EINVAL;
 
-	/* Setting DC mode is not supported for all chips/channels */
+	/* Setting DC mode (0) is not supported for all chips/channels */
 	if (data->REG_PWM_MODE[nr] == 0) {
-		if (val)
+		if (!val)
 			return -EINVAL;
 		return count;
 	}
@@ -2302,7 +2302,7 @@
 	data->pwm_mode[nr] = val;
 	reg = nct6775_read_value(data, data->REG_PWM_MODE[nr]);
 	reg &= ~data->PWM_MODE_MASK[nr];
-	if (val)
+	if (!val)
 		reg |= data->PWM_MODE_MASK[nr];
 	nct6775_write_value(data, data->REG_PWM_MODE[nr], reg);
 	mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index d659a02..c3a8f68 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -154,7 +154,7 @@
 	const struct adm1275_data *data = to_adm1275_data(info);
 	int ret = 0;
 
-	if (page)
+	if (page > 0)
 		return -ENXIO;
 
 	switch (reg) {
@@ -240,7 +240,7 @@
 	const struct adm1275_data *data = to_adm1275_data(info);
 	int ret;
 
-	if (page)
+	if (page > 0)
 		return -ENXIO;
 
 	switch (reg) {
diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c
index dd4883a..e951f9b 100644
--- a/drivers/hwmon/pmbus/max8688.c
+++ b/drivers/hwmon/pmbus/max8688.c
@@ -45,7 +45,7 @@
 {
 	int ret;
 
-	if (page)
+	if (page > 0)
 		return -ENXIO;
 
 	switch (reg) {
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 3bbcc95..04870ce 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -712,10 +712,6 @@
 
 	CS_UNLOCK(drvdata->base);
 
-	/* check the state of the fuse */
-	if (!coresight_authstatus_enabled(drvdata->base))
-		goto out;
-
 	/* First dummy read */
 	(void)etm_readl(drvdata, ETMPDSR);
 	/* Provide power to ETM: ETMPDCR[3] == 1 */
@@ -725,6 +721,11 @@
 	 * certain registers might be ignored.
 	 */
 	etm_clr_pwrdwn(drvdata);
+
+	/* check the state of the fuse */
+	if (!coresight_authstatus_enabled(drvdata->base))
+		goto out;
+
 	/*
 	 * Set prog bit. It will be set from reset but this is included to
 	 * ensure it is set
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 5db74f0..95d13a9 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -27,6 +27,7 @@
 #include <linux/cpu.h>
 #include <linux/coresight.h>
 #include <linux/coresight-pmu.h>
+#include <linux/of.h>
 #include <linux/pm_wakeup.h>
 #include <linux/amba/bus.h>
 #include <linux/seq_file.h>
@@ -167,12 +168,14 @@
 	writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
 	writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
 
-	/*
-	 * Request to keep the trace unit powered and also
-	 * emulation of powerdown
-	 */
-	writel_relaxed(readl_relaxed(drvdata->base + TRCPDCR) | TRCPDCR_PU,
-		       drvdata->base + TRCPDCR);
+	if (!drvdata->tupwr_disable) {
+		/*
+		 * Request to keep the trace unit powered and also
+		 * emulation of powerdown
+		 */
+		writel_relaxed(readl_relaxed(drvdata->base + TRCPDCR)
+				| TRCPDCR_PU, drvdata->base + TRCPDCR);
+	}
 
 	/* Enable the trace unit */
 	writel_relaxed(1, drvdata->base + TRCPRGCTLR);
@@ -313,10 +316,12 @@
 
 	CS_UNLOCK(drvdata->base);
 
-	/* power can be removed from the trace unit now */
-	control = readl_relaxed(drvdata->base + TRCPDCR);
-	control &= ~TRCPDCR_PU;
-	writel_relaxed(control, drvdata->base + TRCPDCR);
+	if (!drvdata->tupwr_disable) {
+		/* power can be removed from the trace unit now */
+		control = readl_relaxed(drvdata->base + TRCPDCR);
+		control &= ~TRCPDCR_PU;
+		writel_relaxed(control, drvdata->base + TRCPDCR);
+	}
 
 	control = readl_relaxed(drvdata->base + TRCPRGCTLR);
 
@@ -1041,6 +1046,9 @@
 
 	etmdrvdata[drvdata->cpu] = drvdata;
 
+	drvdata->tupwr_disable = of_property_read_bool(drvdata->dev->of_node,
+				"qcom,tupwr-disable");
+
 	dev_info(dev, "CPU%d: %s initialized\n",
 			drvdata->cpu, (char *)id->data);
 
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 4e51ecdc..5f34bdc 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -345,6 +345,7 @@
  * @nooverflow:	Indicate if overflow prevention is supported.
  * @atbtrig:	If the implementation can support ATB triggers
  * @lpoverride:	If the implementation can support low-power state over.
+ * @tupwr_disable:	If disable the support of keeping trace unit powered.
  * @config:	structure holding configuration parameters.
  */
 struct etmv4_drvdata {
@@ -391,6 +392,7 @@
 	bool				nooverflow;
 	bool				atbtrig;
 	bool				lpoverride;
+	bool				tupwr_disable;
 	struct etmv4_config		config;
 };
 
diff --git a/drivers/hwtracing/coresight/coresight-ost.c b/drivers/hwtracing/coresight/coresight-ost.c
index a5075ba..340c589 100644
--- a/drivers/hwtracing/coresight/coresight-ost.c
+++ b/drivers/hwtracing/coresight/coresight-ost.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -280,14 +280,13 @@
 
 int stm_set_ost_params(struct stm_drvdata *drvdata, size_t bitmap_size)
 {
-	stmdrvdata = drvdata;
-
 	drvdata->chs.bitmap = devm_kzalloc(drvdata->dev, bitmap_size,
 					   GFP_KERNEL);
 	if (!drvdata->chs.bitmap)
 		return -ENOMEM;
 
 	bitmap_fill(drvdata->entities, OST_ENTITY_MAX);
+	stmdrvdata = drvdata;
 
 	return 0;
 }
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 159512c..caeda7b 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * Description: CoreSight System Trace Macrocell driver
  *
@@ -839,11 +839,6 @@
 	}
 	bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long);
 
-	/* Store the driver data pointer for use in exported functions */
-	ret = stm_set_ost_params(drvdata, bitmap_size);
-	if (ret)
-		return ret;
-
 	guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
 	if (!guaranteed)
 		return -ENOMEM;
@@ -872,6 +867,11 @@
 		goto stm_unregister;
 	}
 
+	/* Store the driver data pointer for use in exported functions */
+	ret = stm_set_ost_params(drvdata, bitmap_size);
+	if (ret)
+		goto stm_unregister;
+
 	pm_runtime_put(&adev->dev);
 
 	dev_info(dev, "%s initialized\n", (char *)id->data);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 9d2ab01..e369ea1 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -845,7 +845,6 @@
 	}
 
 	drvdata->enable = true;
-	drvdata->sticky_enable = true;
 out:
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 	if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 287f901..8aed31a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -139,7 +139,6 @@
 void tmc_enable_hw(struct tmc_drvdata *drvdata)
 {
 	drvdata->enable = true;
-	drvdata->sticky_enable = true;
 	writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
 	if (drvdata->force_reg_dump)
 		__tmc_reg_dump(drvdata);
@@ -155,7 +154,7 @@
 {
 	int ret = 0;
 
-	if (!drvdata->sticky_enable)
+	if (!drvdata->enable)
 		return -EPERM;
 
 	switch (drvdata->config_type) {
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 85a16b1..21c74ec 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -181,8 +181,10 @@
 	if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
 		if (link_ops(csdev)->enable) {
 			ret = link_ops(csdev)->enable(csdev, inport, outport);
-			if (ret)
+			if (ret) {
+				atomic_dec(&csdev->refcnt[refport]);
 				return ret;
+			}
 		}
 	}
 
@@ -263,42 +265,66 @@
 	}
 }
 
-void coresight_disable_path(struct list_head *path)
+static void coresigh_disable_list_node(struct list_head *path,
+					struct coresight_node *nd)
 {
 	u32 type;
-	struct coresight_node *nd;
 	struct coresight_device *csdev, *parent, *child;
 
+	csdev = nd->csdev;
+	type = csdev->type;
+
+	/*
+	 * ETF devices are tricky... They can be a link or a sink,
+	 * depending on how they are configured.  If an ETF has been
+	 * "activated" it will be configured as a sink, otherwise
+	 * go ahead with the link configuration.
+	 */
+	if (type == CORESIGHT_DEV_TYPE_LINKSINK)
+		type = (csdev == coresight_get_sink(path)) ?
+					CORESIGHT_DEV_TYPE_SINK :
+					CORESIGHT_DEV_TYPE_LINK;
+
+	switch (type) {
+	case CORESIGHT_DEV_TYPE_SINK:
+		coresight_disable_sink(csdev);
+		break;
+	case CORESIGHT_DEV_TYPE_SOURCE:
+		/* sources are disabled from either sysFS or Perf */
+		break;
+	case CORESIGHT_DEV_TYPE_LINK:
+		parent = list_prev_entry(nd, link)->csdev;
+		child = list_next_entry(nd, link)->csdev;
+		coresight_disable_link(csdev, parent, child);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * During enabling path, if it is failed, then only those enabled
+ * devices need to be disabled. This function is to disable devices
+ * which is enabled before the failed device.
+ *
+ * @path the head of the list
+ * @nd the failed device node
+ */
+static void coresight_disable_previous_devs(struct list_head *path,
+					struct coresight_node *nd)
+{
+
+	list_for_each_entry_continue(nd, path, link) {
+		coresigh_disable_list_node(path, nd);
+	}
+}
+
+void coresight_disable_path(struct list_head *path)
+{
+	struct coresight_node *nd;
+
 	list_for_each_entry(nd, path, link) {
-		csdev = nd->csdev;
-		type = csdev->type;
-
-		/*
-		 * ETF devices are tricky... They can be a link or a sink,
-		 * depending on how they are configured.  If an ETF has been
-		 * "activated" it will be configured as a sink, otherwise
-		 * go ahead with the link configuration.
-		 */
-		if (type == CORESIGHT_DEV_TYPE_LINKSINK)
-			type = (csdev == coresight_get_sink(path)) ?
-						CORESIGHT_DEV_TYPE_SINK :
-						CORESIGHT_DEV_TYPE_LINK;
-
-		switch (type) {
-		case CORESIGHT_DEV_TYPE_SINK:
-			coresight_disable_sink(csdev);
-			break;
-		case CORESIGHT_DEV_TYPE_SOURCE:
-			/* sources are disabled from either sysFS or Perf */
-			break;
-		case CORESIGHT_DEV_TYPE_LINK:
-			parent = list_prev_entry(nd, link)->csdev;
-			child = list_next_entry(nd, link)->csdev;
-			coresight_disable_link(csdev, parent, child);
-			break;
-		default:
-			break;
-		}
+		coresigh_disable_list_node(path, nd);
 	}
 }
 
@@ -349,7 +375,7 @@
 out:
 	return ret;
 err:
-	coresight_disable_path(path);
+	coresight_disable_previous_devs(path, nd);
 	goto out;
 }
 
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 2dd60ee..b8e2992 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -174,8 +174,9 @@
 {
 	struct stp_master *master;
 	size_t size;
+	unsigned long align = sizeof(unsigned long);
 
-	size = ALIGN(stm->data->sw_nchannels, 8) / 8;
+	size = ALIGN(stm->data->sw_nchannels, align) / align;
 	size += sizeof(struct stp_master);
 	master = kzalloc(size, GFP_ATOMIC);
 	if (!master)
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 340e037..884c1ec 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -507,7 +507,10 @@
 	i2c_dw_disable_int(dev);
 
 	/* Enable the adapter */
-	__i2c_dw_enable_and_wait(dev, true);
+	__i2c_dw_enable(dev, true);
+
+	/* Dummy read to avoid the register getting stuck on Bay Trail */
+	dw_readl(dev, DW_IC_ENABLE_STATUS);
 
 	/* Clear and enable interrupts */
 	dw_readl(dev, DW_IC_CLR_INTR);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index e6fe21a..b32bf7e 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -243,6 +243,7 @@
 	struct i2c_adapter adapter;
 	unsigned long smba;
 	unsigned char original_hstcfg;
+	unsigned char original_slvcmd;
 	struct pci_dev *pci_dev;
 	unsigned int features;
 
@@ -962,13 +963,24 @@
 	if (!priv->host_notify)
 		return -ENOMEM;
 
-	outb_p(SMBSLVCMD_HST_NTFY_INTREN, SMBSLVCMD(priv));
+	if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
+		outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
+		       SMBSLVCMD(priv));
+
 	/* clear Host Notify bit to allow a new notification */
 	outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
 
 	return 0;
 }
 
+static void i801_disable_host_notify(struct i801_priv *priv)
+{
+	if (!(priv->features & FEATURE_HOST_NOTIFY))
+		return;
+
+	outb_p(priv->original_slvcmd, SMBSLVCMD(priv));
+}
+
 static const struct i2c_algorithm smbus_algorithm = {
 	.smbus_xfer	= i801_access,
 	.functionality	= i801_func,
@@ -1589,6 +1601,10 @@
 		outb_p(inb_p(SMBAUXCTL(priv)) &
 		       ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
 
+	/* Remember original Host Notify setting */
+	if (priv->features & FEATURE_HOST_NOTIFY)
+		priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
+
 	/* Default timeout in interrupt mode: 200 ms */
 	priv->adapter.timeout = HZ / 5;
 
@@ -1666,6 +1682,7 @@
 	pm_runtime_forbid(&dev->dev);
 	pm_runtime_get_noresume(&dev->dev);
 
+	i801_disable_host_notify(priv);
 	i801_del_mux(priv);
 	i2c_del_adapter(&priv->adapter);
 	i801_acpi_remove(priv);
@@ -1679,6 +1696,15 @@
 	 */
 }
 
+static void i801_shutdown(struct pci_dev *dev)
+{
+	struct i801_priv *priv = pci_get_drvdata(dev);
+
+	/* Restore config registers to avoid hard hang on some systems */
+	i801_disable_host_notify(priv);
+	pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
+}
+
 #ifdef CONFIG_PM
 static int i801_suspend(struct device *dev)
 {
@@ -1711,6 +1737,7 @@
 	.id_table	= i801_ids,
 	.probe		= i801_probe,
 	.remove		= i801_remove,
+	.shutdown	= i801_shutdown,
 	.driver		= {
 		.pm	= &i801_pm_ops,
 	},
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index b4dec08..5c9dea7 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -848,12 +848,16 @@
 	 */
 	if (of_device_is_compatible(np, "marvell,mv78230-i2c")) {
 		drv_data->offload_enabled = true;
-		drv_data->errata_delay = true;
+		/* The delay is only needed in standard mode (100kHz) */
+		if (bus_freq <= 100000)
+			drv_data->errata_delay = true;
 	}
 
 	if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) {
 		drv_data->offload_enabled = false;
-		drv_data->errata_delay = true;
+		/* The delay is only needed in standard mode (100kHz) */
+		if (bus_freq <= 100000)
+			drv_data->errata_delay = true;
 	}
 
 	if (of_device_is_compatible(np, "allwinner,sun6i-a31-i2c"))
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index bf9a2ad..883fe2c 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1593,6 +1593,8 @@
 	struct cdrom_info *info;
 	int rc = -ENXIO;
 
+	check_disk_change(bdev);
+
 	mutex_lock(&ide_cd_mutex);
 	info = ide_cd_get(bdev->bd_disk);
 	if (!info)
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.c b/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.c
index bfff285..03e9670 100644
--- a/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.c
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.c
@@ -95,12 +95,11 @@
 #define W_FLG	0
 #define R_FLG	1
 int icm20602_bulk_read(struct inv_icm20602_state *st,
-			int reg, char *buf, int size)
+			int reg, u8 *buf, int size)
 {
 	int result = MPU_SUCCESS;
 	char tx_buf[2] = {0x0, 0x0};
-	int tmp_size = size;
-	int tmp_buf = buf;
+	u8 *tmp_buf = buf;
 	struct i2c_msg msg[2];
 
 	if (!st || !buf)
@@ -109,38 +108,31 @@
 	if (st->interface == ICM20602_SPI) {
 		tx_buf[0] = ICM20602_READ_REG(reg);
 		result = spi_write_then_read(st->spi, &tx_buf[0],
-			1, tmp_buf, size);
+		1, tmp_buf, size);
 		if (result) {
 			pr_err("mpu read reg %u failed, rc %d\n",
-				reg, result);
+			reg, result);
 			result = -MPU_READ_FAIL;
 		}
 	} else {
 		result = size;
-		while (tmp_size > 0) {
 #ifdef ICM20602_I2C_SMBUS
-			result += i2c_smbus_read_i2c_block_data(st->client,
-				reg, (tmp_size < 32)?tmp_size:32, tmp_buf);
-			tmp_size -= 32;
-			tmp_buf += tmp_size;
+		result += i2c_smbus_read_i2c_block_data(st->client,
+		reg, size, tmp_buf);
 #else
-			tx_buf[0] = reg;
-			msg[0].addr = st->client->addr;
-			msg[0].flags = W_FLG;
-			msg[0].len = 1;
-			msg[0].buf = tx_buf;
+		tx_buf[0] = reg;
+		msg[0].addr = st->client->addr;
+		msg[0].flags = W_FLG;
+		msg[0].len = 1;
+		msg[0].buf = tx_buf;
 
-			msg[1].addr = st->client->addr;
-			msg[1].flags = I2C_M_RD;
-			msg[1].len = (tmp_size < 32)?tmp_size:32;
-			msg[1].buf = tmp_buf;
-			i2c_transfer(st->client->adapter, msg, ARRAY_SIZE(msg));
-			tmp_size -= 32;
-			tmp_buf += tmp_size;
+		msg[1].addr = st->client->addr;
+		msg[1].flags = I2C_M_RD;
+		msg[1].len = size;
+		msg[1].buf = tmp_buf;
+		i2c_transfer(st->client->adapter, msg, ARRAY_SIZE(msg));
 #endif
-		}
 	}
-
 	return result;
 }
 
@@ -232,7 +224,7 @@
 {
 	struct struct_icm20602_raw_data raw_data;
 
-	if (type & ACCEL != 0) {
+	if ((type & ACCEL) != 0) {
 		icm20602_read_reg(st,
 			reg_set_20602.ACCEL_XOUT_H.address,
 			&raw_data.ACCEL_XOUT_H);
@@ -264,7 +256,7 @@
 			raw_data.ACCEL_ZOUT_L);
 	}
 
-	if (type & GYRO != 0) {
+	if ((type & GYRO) != 0) {
 		icm20602_read_reg(st,
 			reg_set_20602.GYRO_XOUT_H.address,
 			&raw_data.GYRO_XOUT_H);
@@ -334,7 +326,7 @@
 	return MPU_SUCCESS;
 }
 
-static int icm20602_stop_fifo(struct inv_icm20602_state *st)
+int icm20602_stop_fifo(struct inv_icm20602_state *st)
 {
 	struct icm20602_user_config *config = NULL;
 
@@ -355,6 +347,33 @@
 	return MPU_SUCCESS;
 }
 
+int icm20602_int_status(struct inv_icm20602_state *st,
+	u8 *int_status)
+{
+	return icm20602_read_reg(st,
+		reg_set_20602.INT_STATUS.address, int_status);
+}
+
+int icm20602_int_wm_status(struct inv_icm20602_state *st,
+	u8 *int_status)
+{
+	return icm20602_read_reg(st,
+		reg_set_20602.FIFO_WM_INT_STATUS.address, int_status);
+}
+
+int icm20602_fifo_count(struct inv_icm20602_state *st,
+	u16 *fifo_count)
+{
+	u8 count_h, count_l;
+
+	*fifo_count = 0;
+	icm20602_read_reg(st, reg_set_20602.FIFO_COUNTH.address, &count_h);
+	icm20602_read_reg(st, reg_set_20602.FIFO_COUNTL.address, &count_l);
+	*fifo_count |= (count_h << 8);
+	*fifo_count |= count_l;
+	return MPU_SUCCESS;
+}
+
 static int icm20602_config_waterlevel(struct inv_icm20602_state *st)
 {
 	struct icm20602_user_config *config = NULL;
@@ -413,11 +432,7 @@
 
 static int icm20602_set_self_test(struct inv_icm20602_state *st)
 {
-	uint8_t raw_data[6] = {0, 0, 0, 0, 0, 0};
-	uint8_t selfTest[6];
-	float factory_trim[6];
 	int result = 0;
-	int ii;
 
 	reg_set_20602.SMPLRT_DIV.reg_u.REG.SMPLRT_DIV = 0;
 	result |= icm20602_write_reg_simple(st, reg_set_20602.SMPLRT_DIV);
@@ -436,7 +451,7 @@
 	reg_set_20602.ACCEL_CONFIG.reg_u.REG.ACCEL_FS_SEL = ICM20602_ACC_FSR_2G;
 	result |= icm20602_write_reg_simple(st, reg_set_20602.ACCEL_CONFIG);
 
-	//icm20602_read_ST_code(st);
+	icm20602_read_ST_code(st);
 
 	return 0;
 }
@@ -446,8 +461,7 @@
 {
 	struct struct_icm20602_real_data *real_data =
 		kmalloc(sizeof(struct inv_icm20602_state), GFP_ATOMIC);
-	struct icm20602_user_config *config = st->config;
-	int i, j;
+	int i;
 
 	for (i = 0; i < SELFTEST_COUNT; i++) {
 		icm20602_read_raw(st, real_data, ACCEL);
@@ -494,7 +508,7 @@
 {
 	struct struct_icm20602_real_data *real_data =
 		kmalloc(sizeof(struct inv_icm20602_state), GFP_ATOMIC);
-	int i, j;
+	int i;
 
 	for (i = 0; i < SELFTEST_COUNT; i++) {
 		icm20602_read_raw(st, real_data, GYRO);
@@ -550,7 +564,7 @@
 	st_otp.Y = (st_otp.Y != 0) ? mpu_st_tb[acc_ST_code.Y - 1] : 0;
 	st_otp.Z = (st_otp.Z != 0) ? mpu_st_tb[acc_ST_code.Z - 1] : 0;
 
-	if (st_otp.X & st_otp.Y & st_otp.Z == 0)
+	if ((st_otp.X & st_otp.Y & st_otp.Z) == 0)
 		otp_value_zero = true;
 
 	st_shift_cust.X = acc_st->X - acc->X;
@@ -607,11 +621,11 @@
 	gyro_ST_code.Y = st->config->gyro_self_test.Y;
 	gyro_ST_code.Z = st->config->gyro_self_test.Z;
 
-	st_otp.X = (st_otp.X != 0) ? mpu_st_tb[gyro_ST_code.X - 1] : 0;
-	st_otp.Y = (st_otp.Y != 0) ? mpu_st_tb[gyro_ST_code.Y - 1] : 0;
-	st_otp.Z = (st_otp.Z != 0) ? mpu_st_tb[gyro_ST_code.Z - 1] : 0;
+	st_otp.X = (gyro_ST_code.X != 0) ? mpu_st_tb[gyro_ST_code.X - 1] : 0;
+	st_otp.Y = (gyro_ST_code.Y != 0) ? mpu_st_tb[gyro_ST_code.Y - 1] : 0;
+	st_otp.Z = (gyro_ST_code.Z != 0) ? mpu_st_tb[gyro_ST_code.Z - 1] : 0;
 
-	if (st_otp.X & st_otp.Y & st_otp.Z == 0)
+	if ((st_otp.X & st_otp.Y & st_otp.Z) == 0)
 		otp_value_zero = true;
 
 	st_shift_cust.X = gyro_st->X - gyro->X;
@@ -721,8 +735,6 @@
 {
 	struct icm20602_user_config *config = NULL;
 	int result = MPU_SUCCESS;
-	int sample_rate;
-	uint8_t fchoice_b;
 
 	if (st == NULL)
 		return -MPU_FAIL;
@@ -809,6 +821,7 @@
 	int result = MPU_SUCCESS;
 	struct icm20602_user_config *config = NULL;
 	int package_count;
+	int i;
 
 	config = st->config;
 	if (st == NULL || st->config == NULL) {
@@ -860,7 +873,7 @@
 	/* buffer malloc */
 	package_count = config->fifo_waterlevel / ICM20602_PACKAGE_SIZE;
 
-	st->buf = kzalloc(sizeof(config->fifo_waterlevel * 2), GFP_ATOMIC);
+	st->buf = kzalloc(config->fifo_waterlevel * 2, GFP_ATOMIC);
 	if (!st->buf)
 		return -ENOMEM;
 
@@ -869,9 +882,26 @@
 	if (!st->data_push)
 		return -ENOMEM;
 
+	for (i = 0; i < package_count; i++) {
+		st->data_push[i].raw_data =
+			kzalloc(ICM20602_PACKAGE_SIZE, GFP_ATOMIC);
+	}
+
 	return result;
 }
 
+int icm20602_reset_fifo(struct inv_icm20602_state *st)
+{
+	reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x1;
+	if (icm20602_write_reg_simple(st, reg_set_20602.USER_CTRL)) {
+		reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x0;
+		return -MPU_FAIL;
+	}
+	reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x0;
+	return MPU_SUCCESS;
+}
+
+
 void icm20602_rw_test(struct inv_icm20602_state *st)
 {
 	uint8_t val = 0;
@@ -890,7 +920,6 @@
 {
 	int result = MPU_SUCCESS;
 	uint8_t retry = 0, val = 0;
-	uint8_t usr_ctrl = 0;
 
 	pr_debug("icm20602_detect\n");
 	/* reset to make sure previous state are not there */
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_core.c b/drivers/iio/imu/inv_icm20602/inv_icm20602_core.c
index 0f7fc92..7dda14e 100644
--- a/drivers/iio/imu/inv_icm20602/inv_icm20602_core.c
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_core.c
@@ -29,6 +29,7 @@
 #include <linux/of.h>
 #include <linux/of_gpio.h>
 #include "inv_icm20602_iio.h"
+#include <linux/regulator/consumer.h>
 
 /* Attribute of icm20602 device init show */
 static ssize_t inv_icm20602_init_show(struct device *dev,
@@ -41,12 +42,12 @@
 {
 	struct icm20602_user_config *config = st->config;
 
-	config->user_fps_in_ms = 10;
+	config->user_fps_in_ms = 20;
 	config->gyro_lpf = INV_ICM20602_GYRO_LFP_92HZ;
 	config->gyro_fsr = ICM20602_GYRO_FSR_1000DPS;
 	config->acc_lpf = ICM20602_ACCLFP_99;
 	config->acc_fsr = ICM20602_ACC_FSR_4G;
-	config->gyro_accel_sample_rate = ICM20602_SAMPLE_RATE_100HZ;
+	config->gyro_accel_sample_rate = ICM20602_SAMPLE_RATE_200HZ;
 	config->fifo_enabled = true;
 
 }
@@ -89,7 +90,10 @@
 static ssize_t inv_gyro_lpf_show(struct device *dev,
 	struct device_attribute *attr, char *buf)
 {
-	return 0;
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	return snprintf(buf, 4, "%d\n", st->config->gyro_lpf);
 }
 
 static ssize_t inv_gyro_lpf_store(struct device *dev,
@@ -105,6 +109,7 @@
 	if (gyro_lpf > INV_ICM20602_GYRO_LFP_NUM)
 		return -EINVAL;
 	config->gyro_lpf = gyro_lpf;
+
 	return count;
 }
 static IIO_DEVICE_ATTR(
@@ -118,7 +123,10 @@
 static ssize_t inv_gyro_fsr_show(struct device *dev,
 	struct device_attribute *attr, char *buf)
 {
-	return 0;
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	return snprintf(buf, 4, "%d\n", st->config->gyro_fsr);
 }
 
 static ssize_t inv_gyro_fsr_store(struct device *dev,
@@ -145,30 +153,37 @@
 						inv_gyro_fsr_store,
 						0);
 
-/* Attribute of gyro_self_test */
-static ssize_t inv_gyro_self_test_show(struct device *dev,
+/* Attribute of self_test */
+static ssize_t inv_self_test_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	return 0;
 }
 
-static ssize_t inv_gyro_self_test_store(struct device *dev,
+static ssize_t inv_self_test_store(struct device *dev,
 	struct device_attribute *attr, const char *buf, size_t count)
 {
-	return 0;
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	icm20602_self_test(st);
+	return count;
 }
 static IIO_DEVICE_ATTR(
-						inv_icm20602_gyro_self_test,
+						inv_icm20602_self_test,
 						0644,
-						inv_gyro_self_test_show,
-						inv_gyro_self_test_store,
+						inv_self_test_show,
+						inv_self_test_store,
 						0);
 
 /* Attribute of gyro fsr base on enum inv_icm20602_acc_fsr_e */
 static ssize_t inv_gyro_acc_fsr_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	return 0;
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	return snprintf(buf, 4, "%d\n", st->config->acc_fsr);
 }
 
 static ssize_t inv_gyro_acc_fsr_store(struct device *dev,
@@ -198,7 +213,10 @@
 static ssize_t inv_gyro_acc_lpf_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	return 0;
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	return snprintf(buf, 4, "%d\n", st->config->acc_lpf);
 }
 
 static ssize_t inv_gyro_acc_lpf_store(struct device *dev,
@@ -224,30 +242,14 @@
 						inv_gyro_acc_lpf_store,
 						0);
 
-/* Attribute of acc_self_test */
-static ssize_t inv_acc_self_test_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	return 0;
-}
-
-static ssize_t inv_acc_self_test_store(struct device *dev,
-	struct device_attribute *attr, const char *buf, size_t count)
-{
-	return 0;
-}
-static IIO_DEVICE_ATTR(
-						inv_icm20602_acc_self_test,
-						0644,
-						inv_acc_self_test_show,
-						inv_acc_self_test_store,
-						0);
-
 /* Attribute of user_fps_in_ms */
 static ssize_t inv_user_fps_in_ms_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	return 0;
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	return snprintf(buf, 4, "%d\n", st->config->user_fps_in_ms);
 }
 
 static ssize_t inv_user_fps_in_ms_store(struct device *dev,
@@ -277,13 +279,27 @@
 static ssize_t inv_sampling_frequency_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	return 0;
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	return snprintf(buf, 4, "%d\n", st->config->gyro_accel_sample_rate);
 }
 
 static ssize_t inv_sampling_frequency_store(struct device *dev,
 	struct device_attribute *attr, const char *buf, size_t count)
 {
-	return 0;
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+	struct icm20602_user_config *config = st->config;
+	int gyro_accel_sample_rate;
+
+	if (kstrtoint(buf, 10, &gyro_accel_sample_rate))
+		return -EINVAL;
+	if (gyro_accel_sample_rate < 10)
+		return -EINVAL;
+
+	config->gyro_accel_sample_rate = gyro_accel_sample_rate;
+	return count;
 }
 static IIO_DEV_ATTR_SAMP_FREQ(
 						0644,
@@ -293,11 +309,11 @@
 static struct attribute *inv_icm20602_attributes[] = {
 	&iio_dev_attr_inv_icm20602_init.dev_attr.attr,
 
-	&iio_dev_attr_inv_icm20602_gyro_self_test.dev_attr.attr,
 	&iio_dev_attr_inv_icm20602_gyro_fsr.dev_attr.attr,
 	&iio_dev_attr_inv_icm20602_gyro_lpf.dev_attr.attr,
 
-	&iio_dev_attr_inv_icm20602_acc_self_test.dev_attr.attr,
+	&iio_dev_attr_inv_icm20602_self_test.dev_attr.attr,
+
 	&iio_dev_attr_inv_icm20602_acc_fsr.dev_attr.attr,
 	&iio_dev_attr_inv_icm20602_acc_lpf.dev_attr.attr,
 
@@ -363,6 +379,44 @@
 	.validate_trigger = inv_icm20602_validate_trigger,
 };
 
+static int icm20602_ldo_work(struct inv_icm20602_state *st, bool enable)
+{
+	int ret = 0;
+
+	if (enable) {
+		ret = regulator_set_voltage(st->reg_ldo,
+			ICM20602_LDO_VTG_MIN_UV, ICM20602_LDO_VTG_MAX_UV);
+		if (ret)
+			pr_err("Failed to request LDO voltage.\n");
+
+		ret = regulator_enable(st->reg_ldo);
+		if (ret)
+			pr_err("Failed to enable LDO %d\n", ret);
+	} else {
+		ret = regulator_disable(st->reg_ldo);
+		if (ret)
+			pr_err("Failed to disable LDO %d\n", ret);
+		regulator_set_load(st->reg_ldo, 0);
+	}
+
+	return MPU_SUCCESS;
+}
+
+static int icm20602_init_regulators(struct inv_icm20602_state *st)
+{
+	struct regulator *reg;
+
+	reg = regulator_get(&st->client->dev, "vdd-ldo");
+	if (IS_ERR_OR_NULL(reg)) {
+		pr_err("Unable to get regulator for LDO\n");
+		return -MPU_FAIL;
+	}
+
+	st->reg_ldo = reg;
+
+	return MPU_SUCCESS;
+}
+
 static int of_populate_icm20602_dt(struct inv_icm20602_state *st)
 {
 	int result = MPU_SUCCESS;
@@ -450,6 +504,8 @@
 				result);
 		goto out_remove_trigger;
 	}
+	icm20602_init_regulators(st);
+	icm20602_ldo_work(st, true);
 
 	result = inv_icm20602_probe_trigger(indio_dev);
 	if (result) {
@@ -471,7 +527,6 @@
 	inv_icm20602_remove_trigger(st);
 out_unreg_ring:
 	iio_triggered_buffer_cleanup(indio_dev);
-out_free:
 	iio_device_free(indio_dev);
 	gpio_free(st->gpio);
 
@@ -494,11 +549,24 @@
 
 static int inv_icm20602_suspend(struct device *dev)
 {
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	icm20602_stop_fifo(st);
+	icm20602_ldo_work(st, false);
 	return 0;
 }
 
 static int inv_icm20602_resume(struct device *dev)
 {
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct inv_icm20602_state *st = iio_priv(indio_dev);
+
+	icm20602_ldo_work(st, true);
+	icm20602_detect(st);
+	icm20602_init_device(st);
+	icm20602_start_fifo(st);
+
 	return 0;
 }
 
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_iio.h b/drivers/iio/imu/inv_icm20602/inv_icm20602_iio.h
index 943fc1e..b369ae4 100644
--- a/drivers/iio/imu/inv_icm20602/inv_icm20602_iio.h
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_iio.h
@@ -41,6 +41,9 @@
 #define INV20602_SMD_IRQ_TRIGGER    1
 #endif
 
+#define ICM20602_LDO_VTG_MIN_UV 3300000
+#define ICM20602_LDO_VTG_MAX_UV 3300000
+
 #define INV_ICM20602_TIME_STAMP_TOR           5
 #define ICM20602_PACKAGE_SIZE 14
 
@@ -180,7 +183,7 @@
 	uint32_t user_fps_in_ms;
 
 	bool fifo_enabled;
-	uint32_t fifo_waterlevel;
+	uint16_t fifo_waterlevel;
 	struct X_Y_Z wake_on_motion;
 };
 
@@ -217,6 +220,7 @@
 	struct struct_icm20602_data *data_push;
 	enum inv_devices chip_type;
 	int gpio;
+	struct regulator *reg_ldo;
 	DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE);
 };
 
@@ -257,19 +261,21 @@
 
 struct struct_icm20602_data {
 	s64 timestamps;
-	struct struct_icm20602_raw_data raw_data;
-	struct struct_icm20602_real_data real_data;
+	u8 *raw_data;
 };
 
 extern struct iio_trigger *inv_trig;
 irqreturn_t inv_icm20602_irq_handler(int irq, void *p);
 irqreturn_t inv_icm20602_read_fifo_fn(int irq, void *p);
-int inv_icm20602_reset_fifo(struct iio_dev *indio_dev);
 
 int inv_icm20602_probe_trigger(struct iio_dev *indio_dev);
 void inv_icm20602_remove_trigger(struct inv_icm20602_state *st);
 int inv_icm20602_validate_trigger(struct iio_dev *indio_dev,
 				struct iio_trigger *trig);
+int icm20602_int_status(struct inv_icm20602_state *st, u8 *int_status);
+int icm20602_int_wm_status(struct inv_icm20602_state *st, u8 *int_status);
+int icm20602_reset_fifo(struct inv_icm20602_state *st);
+int icm20602_fifo_count(struct inv_icm20602_state *st, u16 *fifo_count);
 
 int icm20602_read_raw(struct inv_icm20602_state *st,
 		struct struct_icm20602_real_data *real_data, uint32_t type);
@@ -279,4 +285,6 @@
 int icm20602_read_fifo(struct inv_icm20602_state *st,
 	void *buf, const int size);
 int icm20602_start_fifo(struct inv_icm20602_state *st);
+int icm20602_stop_fifo(struct inv_icm20602_state *st);
+bool icm20602_self_test(struct inv_icm20602_state *st);
 #endif
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_ring.c b/drivers/iio/imu/inv_icm20602/inv_icm20602_ring.c
index b0f93be..081bec9 100644
--- a/drivers/iio/imu/inv_icm20602/inv_icm20602_ring.c
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_ring.c
@@ -40,15 +40,6 @@
 	spin_unlock_irqrestore(&st->time_stamp_lock, flags);
 }
 
-int inv_icm20602_reset_fifo(struct iio_dev *indio_dev)
-{
-	int result;
-	//u8 d;
-	//struct inv_icm20602_state  *st = iio_priv(indio_dev);
-
-	return result;
-}
-
 /*
  * inv_icm20602_irq_handler() - Cache a timestamp at each data ready interrupt.
  */
@@ -67,52 +58,50 @@
 	return IRQ_WAKE_THREAD;
 }
 
+#define BIT_FIFO_OFLOW_INT			0x10
+#define BIT_FIFO_WM_INT				0x40
 static int inv_icm20602_read_data(struct iio_dev *indio_dev)
 {
 	int result = MPU_SUCCESS;
 	struct inv_icm20602_state *st = iio_priv(indio_dev);
 	struct icm20602_user_config *config = st->config;
 	int package_count;
-	//char *buf = st->buf;
-	//struct struct_icm20602_data *data_push = st->data_push;
+	char *buf = st->buf;
 	s64 timestamp;
+	u8 int_status;
+	u16 fifo_count;
 	int i;
 
 	if (!st)
 		return -MPU_FAIL;
 	package_count = config->fifo_waterlevel / ICM20602_PACKAGE_SIZE;
 	mutex_lock(&indio_dev->mlock);
-	if (config->fifo_enabled) {
-		result = icm20602_read_fifo(st,
-				st->buf, config->fifo_waterlevel);
-		if (result != config->fifo_waterlevel) {
-			pr_err("icm20602 read fifo failed, result = %d\n",
-				result);
-			goto flush_fifo;
-		}
-
-		for (i = 0; i < package_count; i++) {
-			memcpy((char *)(&st->data_push[i].raw_data),
-				st->buf, ICM20602_PACKAGE_SIZE);
-			result = kfifo_out(&st->timestamps,
-				&timestamp, 1);
-			/* when there is no timestamp, put it as 0 */
-			if (result == 0)
-				timestamp = 0;
-			st->data_push[i].timestamps = timestamp;
-			iio_push_to_buffers(indio_dev, st->data_push+i);
-			st->buf += ICM20602_PACKAGE_SIZE;
-		}
+	icm20602_int_status(st, &int_status);
+	if (int_status & BIT_FIFO_OFLOW_INT) {
+		icm20602_fifo_count(st, &fifo_count);
+		pr_debug("fifo_count = %d\n", fifo_count);
+		inv_clear_kfifo(st);
+		icm20602_reset_fifo(st);
+		goto end_session;
 	}
-//end_session:
-	mutex_unlock(&indio_dev->mlock);
-	iio_trigger_notify_done(indio_dev->trig);
-	return MPU_SUCCESS;
-
-flush_fifo:
-	/* Flush HW and SW FIFOs. */
-	inv_clear_kfifo(st);
-	inv_icm20602_reset_fifo(indio_dev);
+	if (config->fifo_enabled) {
+		result = kfifo_out(&st->timestamps,
+				&timestamp, 1);
+		/* when there is no timestamp, put it as 0 */
+		if (result == 0)
+		timestamp = 0;
+		for (i = 0; i < package_count; i++) {
+			result = icm20602_read_fifo(st,
+			buf, ICM20602_PACKAGE_SIZE);
+			memcpy(st->data_push[i].raw_data,
+			buf, ICM20602_PACKAGE_SIZE);
+			iio_push_to_buffers_with_timestamp(indio_dev,
+			st->data_push[i].raw_data, timestamp);
+			buf += ICM20602_PACKAGE_SIZE;
+		}
+		memset(st->buf, 0, config->fifo_waterlevel);
+	}
+end_session:
 	mutex_unlock(&indio_dev->mlock);
 	iio_trigger_notify_done(indio_dev->trig);
 	return MPU_SUCCESS;
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 760ef60..15f4bdf 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -999,8 +999,7 @@
 		return -ENOMEM;
 
 	ib_comp_wq = alloc_workqueue("ib-comp-wq",
-			WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
-			WQ_UNBOUND_MAX_ACTIVE);
+			WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
 	if (!ib_comp_wq) {
 		ret = -ENOMEM;
 		goto err;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 322cb67..28d1845 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -724,21 +724,19 @@
 {
 	int ret;
 	u16 gid_index;
-	u8 p;
 
-	if (rdma_protocol_roce(device, port_num)) {
-		ret = ib_find_cached_gid_by_port(device, &rec->port_gid,
-						 gid_type, port_num,
-						 ndev,
-						 &gid_index);
-	} else if (rdma_protocol_ib(device, port_num)) {
-		ret = ib_find_cached_gid(device, &rec->port_gid,
-					 IB_GID_TYPE_IB, NULL, &p,
+	/* GID table is not based on the netdevice for IB link layer,
+	 * so ignore ndev during search.
+	 */
+	if (rdma_protocol_ib(device, port_num))
+		ndev = NULL;
+	else if (!rdma_protocol_roce(device, port_num))
+		return -EINVAL;
+
+	ret = ib_find_cached_gid_by_port(device, &rec->port_gid,
+					 gid_type, port_num,
+					 ndev,
 					 &gid_index);
-	} else {
-		ret = -EINVAL;
-	}
-
 	if (ret)
 		return ret;
 
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 81b742c..4baf3b8 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1137,10 +1137,9 @@
 
 		resolved_dev = dev_get_by_index(dev_addr.net,
 						dev_addr.bound_dev_if);
-		if (resolved_dev->flags & IFF_LOOPBACK) {
-			dev_put(resolved_dev);
-			resolved_dev = idev;
-			dev_hold(resolved_dev);
+		if (!resolved_dev) {
+			dev_put(idev);
+			return -ENODEV;
 		}
 		ndev = ib_get_ndev_from_path(rec);
 		rcu_read_lock();
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index cb79d17..a036d70 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -676,7 +676,7 @@
 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 		return -EFAULT;
 
-	if (!rdma_addr_size_in6(&cmd.src_addr) ||
+	if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
 	    !rdma_addr_size_in6(&cmd.dst_addr))
 		return -EINVAL;
 
@@ -1296,7 +1296,7 @@
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
-	if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
+	if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
 		return -EINVAL;
 
 	optval = memdup_user((void __user *) (unsigned long) cmd.optval,
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index b85a1a9..9e0f2cc 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -856,6 +856,11 @@
 
 	rdev->status_page->db_off = 0;
 
+	init_completion(&rdev->rqt_compl);
+	init_completion(&rdev->pbl_compl);
+	kref_init(&rdev->rqt_kref);
+	kref_init(&rdev->pbl_kref);
+
 	return 0;
 err_free_status_page:
 	free_page((unsigned long)rdev->status_page);
@@ -872,12 +877,14 @@
 
 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
 {
-	destroy_workqueue(rdev->free_workq);
 	kfree(rdev->wr_log);
 	free_page((unsigned long)rdev->status_page);
 	c4iw_pblpool_destroy(rdev);
 	c4iw_rqtpool_destroy(rdev);
+	wait_for_completion(&rdev->pbl_compl);
+	wait_for_completion(&rdev->rqt_compl);
 	c4iw_destroy_resource(&rdev->resource);
+	destroy_workqueue(rdev->free_workq);
 }
 
 static void c4iw_dealloc(struct uld_ctx *ctx)
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7d54066..896dff7 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -186,6 +186,10 @@
 	struct wr_log_entry *wr_log;
 	int wr_log_size;
 	struct workqueue_struct *free_workq;
+	struct completion rqt_compl;
+	struct completion pbl_compl;
+	struct kref rqt_kref;
+	struct kref pbl_kref;
 };
 
 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 67df71a..803c677 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -260,12 +260,22 @@
 		rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
 		if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
 			rdev->stats.pbl.max = rdev->stats.pbl.cur;
+		kref_get(&rdev->pbl_kref);
 	} else
 		rdev->stats.pbl.fail++;
 	mutex_unlock(&rdev->stats.lock);
 	return (u32)addr;
 }
 
+static void destroy_pblpool(struct kref *kref)
+{
+	struct c4iw_rdev *rdev;
+
+	rdev = container_of(kref, struct c4iw_rdev, pbl_kref);
+	gen_pool_destroy(rdev->pbl_pool);
+	complete(&rdev->pbl_compl);
+}
+
 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
 {
 	PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
@@ -273,6 +283,7 @@
 	rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
 	mutex_unlock(&rdev->stats.lock);
 	gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
+	kref_put(&rdev->pbl_kref, destroy_pblpool);
 }
 
 int c4iw_pblpool_create(struct c4iw_rdev *rdev)
@@ -312,7 +323,7 @@
 
 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
 {
-	gen_pool_destroy(rdev->pbl_pool);
+	kref_put(&rdev->pbl_kref, destroy_pblpool);
 }
 
 /*
@@ -333,12 +344,22 @@
 		rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
 		if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
 			rdev->stats.rqt.max = rdev->stats.rqt.cur;
+		kref_get(&rdev->rqt_kref);
 	} else
 		rdev->stats.rqt.fail++;
 	mutex_unlock(&rdev->stats.lock);
 	return (u32)addr;
 }
 
+static void destroy_rqtpool(struct kref *kref)
+{
+	struct c4iw_rdev *rdev;
+
+	rdev = container_of(kref, struct c4iw_rdev, rqt_kref);
+	gen_pool_destroy(rdev->rqt_pool);
+	complete(&rdev->rqt_compl);
+}
+
 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
 {
 	PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
@@ -346,6 +367,7 @@
 	rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
 	mutex_unlock(&rdev->stats.lock);
 	gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
+	kref_put(&rdev->rqt_kref, destroy_rqtpool);
 }
 
 int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
@@ -383,7 +405,7 @@
 
 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
 {
-	gen_pool_destroy(rdev->rqt_pool);
+	kref_put(&rdev->rqt_kref, destroy_rqtpool);
 }
 
 /*
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 7853b0c..148b313 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -5860,6 +5860,7 @@
 	u64 status;
 	u32 sw_index;
 	int i = 0;
+	unsigned long irq_flags;
 
 	sw_index = dd->hw_to_sw[hw_context];
 	if (sw_index >= dd->num_send_contexts) {
@@ -5869,10 +5870,12 @@
 		return;
 	}
 	sci = &dd->send_contexts[sw_index];
+	spin_lock_irqsave(&dd->sc_lock, irq_flags);
 	sc = sci->sc;
 	if (!sc) {
 		dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
 			   sw_index, hw_context);
+		spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
 		return;
 	}
 
@@ -5894,6 +5897,7 @@
 	 */
 	if (sc->type != SC_USER)
 		queue_work(dd->pport->hfi1_wq, &sc->halt_work);
+	spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
 
 	/*
 	 * Update the counters for the corresponding status bits.
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 632ba21..0022660 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -67,13 +67,13 @@
 	loff_t *ppos)
 {
 	struct dentry *d = file->f_path.dentry;
-	int srcu_idx;
 	ssize_t r;
 
-	r = debugfs_use_file_start(d, &srcu_idx);
-	if (likely(!r))
-		r = seq_read(file, buf, size, ppos);
-	debugfs_use_file_finish(srcu_idx);
+	r = debugfs_file_get(d);
+	if (unlikely(r))
+		return r;
+	r = seq_read(file, buf, size, ppos);
+	debugfs_file_put(d);
 	return r;
 }
 
@@ -83,13 +83,13 @@
 	int whence)
 {
 	struct dentry *d = file->f_path.dentry;
-	int srcu_idx;
 	loff_t r;
 
-	r = debugfs_use_file_start(d, &srcu_idx);
-	if (likely(!r))
-		r = seq_lseek(file, offset, whence);
-	debugfs_use_file_finish(srcu_idx);
+	r = debugfs_file_get(d);
+	if (unlikely(r))
+		return r;
+	r = seq_lseek(file, offset, whence);
+	debugfs_file_put(d);
 	return r;
 }
 
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 84a97f3..ae1f90d 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1049,6 +1049,8 @@
 		return ERR_PTR(-ENOMEM);
 	dd->num_pports = nports;
 	dd->pport = (struct hfi1_pportdata *)(dd + 1);
+	dd->pcidev = pdev;
+	pci_set_drvdata(pdev, dd);
 
 	INIT_LIST_HEAD(&dd->list);
 	idr_preload(GFP_KERNEL);
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 335613a1..7176260 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -162,9 +162,6 @@
 	unsigned long len;
 	resource_size_t addr;
 
-	dd->pcidev = pdev;
-	pci_set_drvdata(pdev, dd);
-
 	addr = pci_resource_start(pdev, 0);
 	len = pci_resource_len(pdev, 0);
 
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 4b892ca..095912fb 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -1515,6 +1515,7 @@
 		err_code = -EOVERFLOW;
 		goto err;
 	}
+	stag &= ~I40IW_CQPSQ_STAG_KEY_MASK;
 	iwmr->stag = stag;
 	iwmr->ibmr.rkey = stag;
 	iwmr->ibmr.lkey = stag;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 19bc1c2..8d59a59 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -216,8 +216,6 @@
 			gid_tbl[i].version = 2;
 			if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
 				gid_tbl[i].type = 1;
-			else
-				memset(&gid_tbl[i].gid, 0, 12);
 		}
 	}
 
@@ -363,8 +361,13 @@
 		if (!gids) {
 			ret = -ENOMEM;
 		} else {
-			for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
-				memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
+			for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
+				memcpy(&gids[i].gid,
+				       &port_gid_table->gids[i].gid,
+				       sizeof(union ib_gid));
+				gids[i].gid_type =
+				    port_gid_table->gids[i].gid_type;
+			}
 		}
 	}
 	spin_unlock_bh(&iboe->lock);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 403df35..abb47e7 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -253,7 +253,11 @@
 	} else {
 		if (ucmd) {
 			qp->rq.wqe_cnt = ucmd->rq_wqe_count;
+			if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
+				return -EINVAL;
 			qp->rq.wqe_shift = ucmd->rq_wqe_shift;
+			if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
+				return -EINVAL;
 			qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
 			qp->rq.max_post = qp->rq.wqe_cnt;
 		} else {
@@ -2164,18 +2168,18 @@
 
 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
 {
-	if (rate == IB_RATE_PORT_CURRENT) {
+	if (rate == IB_RATE_PORT_CURRENT)
 		return 0;
-	} else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
-		return -EINVAL;
-	} else {
-		while (rate != IB_RATE_2_5_GBPS &&
-		       !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
-			 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
-			--rate;
-	}
 
-	return rate + MLX5_STAT_RATE_OFFSET;
+	if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS)
+		return -EINVAL;
+
+	while (rate != IB_RATE_PORT_CURRENT &&
+	       !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
+		 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
+		--rate;
+
+	return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
 }
 
 static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
@@ -2805,8 +2809,10 @@
 		mlx5_ib_qp_disable_pagefaults(qp);
 
 	if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
-	    !optab[mlx5_cur][mlx5_new])
+	    !optab[mlx5_cur][mlx5_new]) {
+		err = -EINVAL;
 		goto out;
+	}
 
 	op = optab[mlx5_cur][mlx5_new];
 	optpar = ib_mask_to_mlx5_opt(attr_mask);
@@ -2848,7 +2854,8 @@
 	 * If we moved a kernel QP to RESET, clean up all old CQ
 	 * entries and reinitialize the QP.
 	 */
-	if (new_state == IB_QPS_RESET && !ibqp->uobject) {
+	if (new_state == IB_QPS_RESET &&
+	    !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) {
 		mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
 				 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
 		if (send_cq != recv_cq)
@@ -4605,13 +4612,10 @@
 	int err;
 
 	err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
-	if (err) {
+	if (err)
 		mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
-		return err;
-	}
 
 	kfree(xrcd);
-
 	return 0;
 }
 
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 58e92bc..f937873 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -762,7 +762,8 @@
 
 	dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
 	if (!dev->num_cnq) {
-		DP_ERR(dev, "not enough CNQ resources.\n");
+		DP_ERR(dev, "Failed. At least one CNQ is required.\n");
+		rc = -ENOMEM;
 		goto init_err;
 	}
 
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 35d5b89..cd0408c 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1888,18 +1888,23 @@
 		SET_FIELD(qp_params.modify_flags,
 			  QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
 
-		qp_params.ack_timeout = attr->timeout;
-		if (attr->timeout) {
-			u32 temp;
-
-			temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
-			/* FW requires [msec] */
-			qp_params.ack_timeout = temp;
-		} else {
-			/* Infinite */
+		/* The received timeout value is an exponent used like this:
+		 *    "12.7.34 LOCAL ACK TIMEOUT
+		 *    Value representing the transport (ACK) timeout for use by
+		 *    the remote, expressed as: 4.096 * 2^timeout [usec]"
+		 * The FW expects timeout in msec so we need to divide the usec
+		 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
+		 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
+		 * The value of zero means infinite so we use a 'max_t' to make
+		 * sure that sub 1 msec values will be configured as 1 msec.
+		 */
+		if (attr->timeout)
+			qp_params.ack_timeout =
+					1 << max_t(int, attr->timeout - 8, 0);
+		else
 			qp_params.ack_timeout = 0;
-		}
 	}
+
 	if (attr_mask & IB_QP_RETRY_CNT) {
 		SET_FIELD(qp_params.modify_flags,
 			  QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
@@ -2807,6 +2812,11 @@
 
 	switch (wr->opcode) {
 	case IB_WR_SEND_WITH_IMM:
+		if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
+			rc = -EINVAL;
+			*bad_wr = wr;
+			break;
+		}
 		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
 		swqe = (struct rdma_sq_send_wqe_1st *)wqe;
 		swqe->wqe_size = 2;
@@ -2848,6 +2858,11 @@
 		break;
 
 	case IB_WR_RDMA_WRITE_WITH_IMM:
+		if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
+			rc = -EINVAL;
+			*bad_wr = wr;
+			break;
+		}
 		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
 		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
 
@@ -3467,7 +3482,7 @@
 {
 	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
 	struct qedr_cq *cq = get_qedr_cq(ibcq);
-	union rdma_cqe *cqe = cq->latest_cqe;
+	union rdma_cqe *cqe;
 	u32 old_cons, new_cons;
 	unsigned long flags;
 	int update = 0;
@@ -3477,6 +3492,7 @@
 		return qedr_gsi_poll_cq(ibcq, num_entries, wc);
 
 	spin_lock_irqsave(&cq->cq_lock, flags);
+	cqe = cq->latest_cqe;
 	old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
 	while (num_entries && is_valid_cqe(cq, cqe)) {
 		struct qedr_qp *qp;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 0df7d45..17c5bc7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2119,6 +2119,9 @@
 		goto event_failed;
 	}
 
+	/* call event handler to ensure pkey in sync */
+	queue_work(ipoib_workqueue, &priv->flush_heavy);
+
 	result = register_netdev(priv->dev);
 	if (result) {
 		printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 766bf26..5f04b2d 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -88,6 +88,7 @@
 			      const struct input_device_id *id)
 {
 	struct input_leds *leds;
+	struct input_led *led;
 	unsigned int num_leds;
 	unsigned int led_code;
 	int led_no;
@@ -119,14 +120,13 @@
 
 	led_no = 0;
 	for_each_set_bit(led_code, dev->ledbit, LED_CNT) {
-		struct input_led *led = &leds->leds[led_no];
-
-		led->handle = &leds->handle;
-		led->code = led_code;
-
 		if (!input_led_info[led_code].name)
 			continue;
 
+		led = &leds->leds[led_no];
+		led->handle = &leds->handle;
+		led->code = led_code;
+
 		led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
 					   dev_name(&dev->dev),
 					   input_led_info[led_code].name);
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index 930424e..251d64c 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -521,7 +521,7 @@
 	if (!haptics)
 		return -ENOMEM;
 
-	haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
+	haptics->overdrive_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
 	haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
 
 	if (pdata) {
diff --git a/drivers/input/misc/vl53l0x/src/vl53l0x_api_calibration.c b/drivers/input/misc/vl53l0x/src/vl53l0x_api_calibration.c
index 08b5ce2..3905bd2 100644
--- a/drivers/input/misc/vl53l0x/src/vl53l0x_api_calibration.c
+++ b/drivers/input/misc/vl53l0x/src/vl53l0x_api_calibration.c
@@ -506,7 +506,7 @@
 					*pIsAperture = 1;
 					if ((byteIndex < 2) && (bitIndex < 4))
 						*pIsAperture = 0;
-						spadTypeIdentified = 1;
+					spadTypeIdentified = 1;
 				}
 			}
 			tempByte >>= 1;
diff --git a/drivers/input/misc/vl53l0x/src/vl53l0x_api_core.c b/drivers/input/misc/vl53l0x/src/vl53l0x_api_core.c
index ceb3585..6824c02 100644
--- a/drivers/input/misc/vl53l0x/src/vl53l0x_api_core.c
+++ b/drivers/input/misc/vl53l0x/src/vl53l0x_api_core.c
@@ -182,7 +182,7 @@
 			Status = VL_RdByte(Dev, 0x83, &strobe);
 			if ((strobe != 0x00) || Status != VL_ERROR_NONE)
 				break;
-				LoopNb = LoopNb + 1;
+			LoopNb = LoopNb + 1;
 		} while (LoopNb < VL_DEFAULT_MAX_LOOP);
 
 		if (LoopNb >= VL_DEFAULT_MAX_LOOP)
diff --git a/drivers/input/misc/vl53l0x/stmvl53l0x.h b/drivers/input/misc/vl53l0x/stmvl53l0x.h
index 1f15278..1be251f 100644
--- a/drivers/input/misc/vl53l0x/stmvl53l0x.h
+++ b/drivers/input/misc/vl53l0x/stmvl53l0x.h
@@ -34,6 +34,7 @@
 /* #define INT_POLLING_DELAY	20 */
 
 /* if don't want to have output from dbg, comment out #DEBUG macro */
+#define DEBUG
 #ifdef DEBUG
 #define dbg(fmt, ...)	\
 	printk(fmt, ##__VA_ARGS__)
@@ -51,6 +52,8 @@
 	NORMAL_MODE = 0,
 	OFFSETCALIB_MODE = 1,
 	XTALKCALIB_MODE = 2,
+	SPADCALIB_MODE = 3,
+	REFCALIB_MODE = 4,
 };
 
 enum parameter_name_e {
@@ -167,6 +170,8 @@
 	/* Debug */
 	unsigned int enableDebug;
 	uint8_t interrupt_received;
+	int32_t default_offset_calibration;
+	unsigned int default_xtalk_Compensation;
 };
 
 /*
diff --git a/drivers/input/misc/vl53l0x/stmvl53l0x_module.c b/drivers/input/misc/vl53l0x/stmvl53l0x_module.c
index ece245f..99cbc11 100644
--- a/drivers/input/misc/vl53l0x/stmvl53l0x_module.c
+++ b/drivers/input/misc/vl53l0x/stmvl53l0x_module.c
@@ -777,18 +777,18 @@
 				size_t count)
 {
 	struct vl_data *data = dev_get_drvdata(dev);
-
+	int ret;
 	unsigned int val;
 
-	kstrtoint(buf, 10, &val);
+	ret = kstrtoint(buf, 10, &val);
 	if ((val != 0) && (val != 1)) {
-		err("store unvalid value=%ld\n", val);
+		err("store unvalid value=%d\n", val);
 		return count;
 	}
 	mutex_lock(&data->work_mutex);
 	dbg("Enter, enable_ps_sensor flag:%d\n",
 		data->enable_ps_sensor);
-	dbg("enable ps senosr ( %ld)\n", val);
+	dbg("enable ps senosr ( %d)\n", val);
 
 	if (val == 1) {
 		/* turn on tof sensor */
@@ -831,10 +831,11 @@
 {
 	struct vl_data *data = dev_get_drvdata(dev);
 	int on;
+	int ret;
 
-	kstrtoint(buf, 10, &on);
+	ret = kstrtoint(buf, 10, &on);
 	if ((on != 0) &&  (on != 1)) {
-		err("set debug=%ld\n", on);
+		err("set debug=%d\n", on);
 		return count;
 	}
 	data->enableDebug = on;
@@ -862,10 +863,11 @@
 {
 	struct vl_data *data = dev_get_drvdata(dev);
 	int delay_ms;
+	int ret;
 
-	kstrtoint(buf, 10, &delay_ms);
+	ret = kstrtoint(buf, 10, &delay_ms);
 	if (delay_ms == 0) {
-		err("set delay_ms=%ld\n", delay_ms);
+		err("set delay_ms=%d\n", delay_ms);
 		return count;
 	}
 	mutex_lock(&data->work_mutex);
@@ -895,10 +897,11 @@
 {
 	struct vl_data *data = dev_get_drvdata(dev);
 	int timingBudget;
+	int ret;
 
-	kstrtoint(buf, 10, &timingBudget);
+	ret = kstrtoint(buf, 10, &timingBudget);
 	if (timingBudget == 0) {
-		err("set timingBudget=%ld\n", timingBudget);
+		err("set timingBudget=%d\n", timingBudget);
 		return count;
 	}
 	mutex_lock(&data->work_mutex);
@@ -929,10 +932,11 @@
 {
 	struct vl_data *data = dev_get_drvdata(dev);
 	int useLongRange;
+	int ret;
 
-	kstrtoint(buf, 10, &useLongRange);
+	ret = kstrtoint(buf, 10, &useLongRange);
 	if ((useLongRange != 0) &&  (useLongRange != 1)) {
-		err("set useLongRange=%ld\n", useLongRange);
+		err("set useLongRange=%d\n", useLongRange);
 		return count;
 	}
 
@@ -960,7 +964,6 @@
 	struct VL_RangingMeasurementData_t Measure;
 
 	papi_func_tbl->PerformSingleRangingMeasurement(data, &Measure);
-	dbg("Measure = %d\n", Measure.RangeMilliMeter);
 	return snprintf(buf, 4, "%d\n", Measure.RangeMilliMeter);
 }
 
@@ -972,11 +975,7 @@
 static ssize_t stmvl53l0x_show_xtalk(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
-	struct vl_data *data = dev_get_drvdata(dev);
-	struct VL_RangingMeasurementData_t Measure;
-
-	dbg("Measure = %d\n", Measure.RangeMilliMeter);
-	return snprintf(buf, 4, "%d\n", Measure.RangeMilliMeter);
+	return 0;
 }
 
 static ssize_t stmvl53l0x_set_xtalk(struct device *dev,
@@ -985,8 +984,9 @@
 {
 	struct vl_data *data = dev_get_drvdata(dev);
 	unsigned int targetDistance;
+	int ret;
 
-	kstrtoint(buf, 10, &targetDistance);
+	ret = kstrtoint(buf, 10, &targetDistance);
 	data->xtalkCalDistance = targetDistance;
 	stmvl53l0x_start(data, 3, XTALKCALIB_MODE);
 	return count;
@@ -1004,7 +1004,6 @@
 	struct VL_RangingMeasurementData_t Measure;
 
 	papi_func_tbl->PerformSingleRangingMeasurement(data, &Measure);
-	dbg("Measure = %d\n", Measure.RangeMilliMeter);
 	return snprintf(buf, 4, "%d\n", Measure.RangeMilliMeter);
 }
 
@@ -1014,8 +1013,9 @@
 {
 	struct vl_data *data = dev_get_drvdata(dev);
 	unsigned int targetDistance;
+	int ret;
 
-	kstrtoint(buf, 10, &targetDistance);
+	ret = kstrtoint(buf, 10, &targetDistance);
 	data->offsetCalDistance = targetDistance;
 	stmvl53l0x_start(data, 3, OFFSETCALIB_MODE);
 	return count;
@@ -1026,6 +1026,109 @@
 				   stmvl53l0x_show_offset,
 				   stmvl53l0x_set_offset);
 
+static ssize_t stmvl53l0x_set_spad(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+
+	stmvl53l0x_start(data, 3, SPADCALIB_MODE);
+	return count;
+}
+
+/* DEVICE_ATTR(name,mode,show,store) */
+static DEVICE_ATTR(spad_cal, 0660/*S_IWUGO | S_IRUGO*/,
+					NULL,
+					stmvl53l0x_set_spad);
+
+static ssize_t stmvl53l0x_set_ref(struct device *dev,
+						struct device_attribute *attr,
+						const char *buf, size_t count)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+
+	stmvl53l0x_start(data, 3, REFCALIB_MODE);
+	return count;
+}
+
+/* DEVICE_ATTR(name,mode,show,store) */
+static DEVICE_ATTR(ref_cal, 0660/*S_IWUGO | S_IRUGO*/,
+					NULL,
+					stmvl53l0x_set_ref);
+
+
+static ssize_t stmvl53l0x_show_OffsetCalibrationData(struct device *dev,
+						struct device_attribute *attr,
+						char *buf)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+	int32_t offset_calibration_data;
+
+	papi_func_tbl->GetOffsetCalibrationDataMicroMeter(data,
+					&offset_calibration_data);
+	dbg("GetOffsetCalibrationDataMicroMeter = %d\n",
+					offset_calibration_data);
+	return snprintf(buf, 2, "%d\n",
+			offset_calibration_data);
+}
+
+static ssize_t stmvl53l0x_set_OffsetCalibrationData(struct device *dev,
+						struct device_attribute *attr,
+						const char *buf, size_t count)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+	int32_t offset_calibration_data;
+	int ret;
+
+	ret = kstrtoint(buf, 10, &offset_calibration_data);
+	papi_func_tbl->SetOffsetCalibrationDataMicroMeter(data,
+			offset_calibration_data);
+	return count;
+}
+
+/* DEVICE_ATTR(name,mode,show,store) */
+static DEVICE_ATTR(set_offsetdata, 0660/*S_IWUGO | S_IRUGO*/,
+					stmvl53l0x_show_OffsetCalibrationData,
+					stmvl53l0x_set_OffsetCalibrationData);
+
+static ssize_t stmvl53l0x_show_XTalkCompensationRateMegaCps(struct device *dev,
+						struct device_attribute *attr,
+						char *buf)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+	int32_t xtalk_compensation_rate;
+
+	papi_func_tbl->GetXTalkCompensationRateMegaCps(data,
+					&xtalk_compensation_rate);
+	dbg("xtalk_compensation_rate = %d\n",
+					xtalk_compensation_rate);
+	return snprintf(buf, 2, "%d\n", xtalk_compensation_rate);
+
+}
+
+static ssize_t stmvl53l0x_set_XTalkCompensationRateMegaCps(struct device *dev,
+						struct device_attribute *attr,
+						const char *buf, size_t count)
+{
+	struct vl_data *data = dev_get_drvdata(dev);
+	unsigned int xtalk_compensation_rate;
+	int ret;
+
+	ret = kstrtoint(buf, 10, &xtalk_compensation_rate);
+	papi_func_tbl->SetXTalkCompensationRateMegaCps(data,
+						xtalk_compensation_rate);
+	return count;
+
+}
+
+/* DEVICE_ATTR(name,mode,show,store) */
+static DEVICE_ATTR(set_xtalkdata, 0660/*S_IWUGO | S_IRUGO*/,
+				stmvl53l0x_show_XTalkCompensationRateMegaCps,
+				stmvl53l0x_set_XTalkCompensationRateMegaCps);
+
+
+
 static struct attribute *stmvl53l0x_attributes[] = {
 	&dev_attr_enable_ps_sensor.attr,
 	&dev_attr_enable_debug.attr,
@@ -1035,6 +1138,10 @@
 	&dev_attr_show_meter.attr,
 	&dev_attr_xtalk_cal.attr,
 	&dev_attr_offset_cal.attr,
+	&dev_attr_spad_cal.attr,
+	&dev_attr_ref_cal.attr,
+	&dev_attr_set_offsetdata.attr,
+	&dev_attr_set_xtalkdata.attr,
 	NULL,
 };
 
@@ -1568,13 +1675,36 @@
 		dbg("Offset calibration:%u\n", OffsetMicroMeter);
 		return rc;
 	} else if (mode == XTALKCALIB_MODE) {
-		unsigned int XTalkCompensationRateMegaCps;
+		unsigned int xtalk_compensation_rate_mega_cps;
 		/*caltarget distance : 100mm and convert to */
 		/* fixed point 16 16 format */
 		papi_func_tbl->PerformXTalkCalibration(vl53l0x_dev,
 			(data->xtalkCalDistance<<16),
-			&XTalkCompensationRateMegaCps);
-		dbg("Xtalk calibration:%u\n", XTalkCompensationRateMegaCps);
+			&xtalk_compensation_rate_mega_cps);
+		dbg("Xtalk calibration:%u\n", xtalk_compensation_rate_mega_cps);
+		return rc;
+	} else if (mode == SPADCALIB_MODE) {
+		uint32_t ref_spad_count;
+		uint8_t is_aperture_spads;
+
+		papi_func_tbl->PerformRefSpadManagement(
+			vl53l0x_dev,
+			&ref_spad_count,
+			&is_aperture_spads);
+		dbg("SPAD calibration:%d,%d\n", ref_spad_count,
+					(unsigned int)is_aperture_spads);
+		return rc;
+	} else if (mode == REFCALIB_MODE) {
+		uint8_t vhv_settings;
+		uint8_t phase_cal;
+
+		papi_func_tbl->PerformRefCalibration(
+			vl53l0x_dev,
+			&vhv_settings,
+			&phase_cal);
+		dbg("Ref calibration:%u,%u\n",
+		(unsigned int)vhv_settings,
+		(unsigned int)phase_cal);
 		return rc;
 	}
 	/* set up device parameters */
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index bee2674..5cbf17a 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -937,6 +937,21 @@
 	psmouse->pt_deactivate = NULL;
 }
 
+static bool psmouse_do_detect(int (*detect)(struct psmouse *, bool),
+			      struct psmouse *psmouse, bool allow_passthrough,
+			      bool set_properties)
+{
+	if (psmouse->ps2dev.serio->id.type == SERIO_PS_PSTHRU &&
+	    !allow_passthrough) {
+		return false;
+	}
+
+	if (set_properties)
+		psmouse_apply_defaults(psmouse);
+
+	return detect(psmouse, set_properties) == 0;
+}
+
 static bool psmouse_try_protocol(struct psmouse *psmouse,
 				 enum psmouse_type type,
 				 unsigned int *max_proto,
@@ -948,15 +963,8 @@
 	if (!proto)
 		return false;
 
-	if (psmouse->ps2dev.serio->id.type == SERIO_PS_PSTHRU &&
-	    !proto->try_passthru) {
-		return false;
-	}
-
-	if (set_properties)
-		psmouse_apply_defaults(psmouse);
-
-	if (proto->detect(psmouse, set_properties) != 0)
+	if (!psmouse_do_detect(proto->detect, psmouse, proto->try_passthru,
+			       set_properties))
 		return false;
 
 	if (set_properties && proto->init && init_allowed) {
@@ -988,8 +996,8 @@
 	 * Always check for focaltech, this is safe as it uses pnp-id
 	 * matching.
 	 */
-	if (psmouse_try_protocol(psmouse, PSMOUSE_FOCALTECH,
-				 &max_proto, set_properties, false)) {
+	if (psmouse_do_detect(focaltech_detect,
+			      psmouse, false, set_properties)) {
 		if (max_proto > PSMOUSE_IMEX &&
 		    IS_ENABLED(CONFIG_MOUSE_PS2_FOCALTECH) &&
 		    (!set_properties || focaltech_init(psmouse) == 0)) {
@@ -1035,8 +1043,8 @@
 	 * probing for IntelliMouse.
 	 */
 	if (max_proto > PSMOUSE_PS2 &&
-	    psmouse_try_protocol(psmouse, PSMOUSE_SYNAPTICS, &max_proto,
-				 set_properties, false)) {
+	    psmouse_do_detect(synaptics_detect,
+			      psmouse, false, set_properties)) {
 		synaptics_hardware = true;
 
 		if (max_proto > PSMOUSE_IMEX) {
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index e5d185f..2613240 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -3028,6 +3028,15 @@
 		.driver_data = samus_platform_data,
 	},
 	{
+		/* Samsung Chromebook Pro */
+		.ident = "Samsung Chromebook Pro",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"),
+		},
+		.driver_data = samus_platform_data,
+	},
+	{
 		/* Other Google Chromebooks */
 		.ident = "Chromebook",
 		.matches = {
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c
index 21a9e8f..21236e9 100644
--- a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c
@@ -113,6 +113,13 @@
 #define F12_WAKEUP_GESTURE_MODE 0x02
 #define F12_UDG_DETECT 0x0f
 
+#define PWR_VTG_MIN_UV		2700000
+#define PWR_VTG_MAX_UV		3600000
+#define PWR_ACTIVE_LOAD_UA	2000
+#define I2C_VTG_MIN_UV		1710000
+#define I2C_VTG_MAX_UV		2000000
+#define I2C_ACTIVE_LOAD_UA	7000
+
 static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
 		bool *was_in_bl_mode);
 static int synaptics_rmi4_free_fingers(struct synaptics_rmi4_data *rmi4_data);
@@ -3407,6 +3414,66 @@
 	return retval;
 }
 
+static int reg_set_optimum_mode_check(struct regulator *reg, int load_uA)
+{
+	return (regulator_count_voltages(reg) > 0) ?
+		regulator_set_load(reg, load_uA) : 0;
+}
+
+static int synaptics_rmi4_configure_reg(struct synaptics_rmi4_data *rmi4_data,
+				bool on)
+{
+	int retval;
+
+	if (on == false)
+		goto hw_shutdown;
+
+	if (rmi4_data->pwr_reg) {
+		if (regulator_count_voltages(rmi4_data->pwr_reg) > 0) {
+			retval = regulator_set_voltage(rmi4_data->pwr_reg,
+				PWR_VTG_MIN_UV, PWR_VTG_MAX_UV);
+			if (retval) {
+				dev_err(rmi4_data->pdev->dev.parent,
+					"regulator set_vtg failed retval =%d\n",
+					retval);
+				goto err_set_vtg_pwr;
+			}
+		}
+	}
+
+	if (rmi4_data->bus_reg) {
+		if (regulator_count_voltages(rmi4_data->bus_reg) > 0) {
+			retval = regulator_set_voltage(rmi4_data->bus_reg,
+				I2C_VTG_MIN_UV, I2C_VTG_MAX_UV);
+			if (retval) {
+				dev_err(rmi4_data->pdev->dev.parent,
+					"regulator set_vtg failed retval =%d\n",
+					retval);
+				goto err_set_vtg_bus;
+			}
+		}
+	}
+
+	return 0;
+
+err_set_vtg_bus:
+	if (rmi4_data->pwr_reg &&
+		regulator_count_voltages(rmi4_data->pwr_reg) > 0)
+		regulator_set_voltage(rmi4_data->pwr_reg, 0, PWR_VTG_MAX_UV);
+err_set_vtg_pwr:
+	return retval;
+
+hw_shutdown:
+	if (rmi4_data->pwr_reg &&
+		regulator_count_voltages(rmi4_data->pwr_reg) > 0)
+		regulator_set_voltage(rmi4_data->pwr_reg, 0, PWR_VTG_MAX_UV);
+	if (rmi4_data->bus_reg &&
+		regulator_count_voltages(rmi4_data->bus_reg) > 0)
+		regulator_set_voltage(rmi4_data->bus_reg, 0, I2C_VTG_MAX_UV);
+
+	return 0;
+}
+
 static int synaptics_rmi4_get_reg(struct synaptics_rmi4_data *rmi4_data,
 		bool get)
 {
@@ -3472,37 +3539,66 @@
 	}
 
 	if (rmi4_data->bus_reg) {
+		retval = reg_set_optimum_mode_check(rmi4_data->bus_reg,
+					I2C_ACTIVE_LOAD_UA);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Regulator set_opt failed rc=%d\n",
+					__func__, retval);
+			return retval;
+		}
+
 		retval = regulator_enable(rmi4_data->bus_reg);
 		if (retval < 0) {
 			dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to enable bus pullup regulator\n",
 					__func__);
-			goto exit;
+			goto err_bus_reg_en;
 		}
 	}
 
 	if (rmi4_data->pwr_reg) {
+		retval = reg_set_optimum_mode_check(rmi4_data->pwr_reg,
+					PWR_ACTIVE_LOAD_UA);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Regulator set_opt failed rc=%d\n",
+					__func__, retval);
+			goto disable_bus_reg;
+		}
+
 		retval = regulator_enable(rmi4_data->pwr_reg);
 		if (retval < 0) {
 			dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to enable power regulator\n",
 					__func__);
-			goto disable_bus_reg;
+			goto err_pwr_reg_en;
 		}
 		msleep(bdata->power_delay_ms);
 	}
 
 	return 0;
 
+err_pwr_reg_en:
+	reg_set_optimum_mode_check(rmi4_data->pwr_reg, 0);
+	goto disable_bus_reg;
+err_bus_reg_en:
+	reg_set_optimum_mode_check(rmi4_data->bus_reg, 0);
+
+	return retval;
+
 disable_pwr_reg:
-	if (rmi4_data->pwr_reg)
+	if (rmi4_data->pwr_reg) {
+		reg_set_optimum_mode_check(rmi4_data->pwr_reg, 0);
 		regulator_disable(rmi4_data->pwr_reg);
+	}
 
 disable_bus_reg:
-	if (rmi4_data->bus_reg)
+	if (rmi4_data->bus_reg) {
+		reg_set_optimum_mode_check(rmi4_data->bus_reg, 0);
 		regulator_disable(rmi4_data->bus_reg);
+	}
 
-exit:
 	return retval;
 }
 
@@ -3976,6 +4072,14 @@
 		goto err_get_reg;
 	}
 
+	retval = synaptics_rmi4_configure_reg(rmi4_data, true);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to configure regulators\n",
+				__func__);
+		goto err_configure_reg;
+	}
+
 	retval = synaptics_rmi4_enable_reg(rmi4_data, true);
 	if (retval < 0) {
 		dev_err(&pdev->dev,
@@ -4202,6 +4306,8 @@
 err_enable_reg:
 	synaptics_rmi4_get_reg(rmi4_data, false);
 
+err_configure_reg:
+	synaptics_rmi4_configure_reg(rmi4_data, false);
 err_get_reg:
 	kfree(rmi4_data);
 
@@ -4284,6 +4390,7 @@
 	}
 
 	synaptics_rmi4_enable_reg(rmi4_data, false);
+	synaptics_rmi4_configure_reg(rmi4_data, false);
 	synaptics_rmi4_get_reg(rmi4_data, false);
 
 	kfree(rmi4_data);
@@ -4557,6 +4664,7 @@
 	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
 	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
 	int retval;
+	int lpm_uA;
 
 	if (rmi4_data->stay_awake)
 		return 0;
@@ -4565,6 +4673,17 @@
 
 	if (rmi4_data->enable_wakeup_gesture) {
 		if (!rmi4_data->suspend) {
+			/* Set lpm current for bus regulator */
+			lpm_uA = rmi4_data->hw_if->board_data->bus_lpm_cur_uA;
+			if (lpm_uA) {
+				retval = reg_set_optimum_mode_check(
+						rmi4_data->bus_reg, lpm_uA);
+				if (retval < 0)
+					dev_err(dev,
+					"Bus Regulator set_opt failed rc=%d\n",
+					retval);
+			}
+
 			synaptics_rmi4_wakeup_gesture(rmi4_data, true);
 			enable_irq_wake(rmi4_data->irq);
 		}
@@ -4594,7 +4713,8 @@
 	}
 	mutex_unlock(&exp_data.mutex);
 
-	if (!rmi4_data->suspend && !rmi4_data->enable_wakeup_gesture)
+	if (!rmi4_data->suspend && !rmi4_data->enable_wakeup_gesture &&
+			!rmi4_data->hw_if->board_data->dont_disable_regs)
 		synaptics_rmi4_enable_reg(rmi4_data, false);
 
 	rmi4_data->suspend = true;
@@ -4623,6 +4743,18 @@
 
 	if (rmi4_data->enable_wakeup_gesture) {
 		if (rmi4_data->suspend) {
+			/* Set active current for the bus regulator */
+			if (rmi4_data->hw_if->board_data->bus_lpm_cur_uA) {
+				retval = reg_set_optimum_mode_check(
+						rmi4_data->bus_reg,
+						I2C_ACTIVE_LOAD_UA);
+				if (retval < 0)
+					dev_err(dev,
+					"Pwr regulator set_opt failed rc=%d\n",
+					retval);
+			}
+
+
 			synaptics_rmi4_wakeup_gesture(rmi4_data, false);
 			disable_irq_wake(rmi4_data->irq);
 		}
@@ -4631,7 +4763,8 @@
 
 	rmi4_data->current_page = MASK_8BIT;
 
-	if (rmi4_data->suspend)
+	if (rmi4_data->suspend &&
+			!rmi4_data->hw_if->board_data->dont_disable_regs)
 		synaptics_rmi4_enable_reg(rmi4_data, true);
 
 	synaptics_rmi4_sleep_enable(rmi4_data, false);
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c
index f634f17..e078853 100644
--- a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c
@@ -84,6 +84,9 @@
 	bdata->wakeup_gesture_en = of_property_read_bool(np,
 			"synaptics,wakeup-gestures-en");
 
+	bdata->dont_disable_regs = of_property_read_bool(np,
+			"synaptics,do-not-disable-regulators");
+
 	retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
 	if (retval < 0)
 		bdata->pwr_reg_name = NULL;
@@ -184,6 +187,10 @@
 		bdata->max_y_for_2d = -1;
 	}
 
+	retval = of_property_read_u32(np, "synaptics,bus-lpm-cur-uA",
+			&value);
+	bdata->bus_lpm_cur_uA = retval < 0 ? 0 : value;
+
 	bdata->swap_axes = of_property_read_bool(np, "synaptics,swap-axes");
 	bdata->x_flip = of_property_read_bool(np, "synaptics,x-flip");
 	bdata->y_flip = of_property_read_bool(np, "synaptics,y-flip");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index d52b534..c1a07a5 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -60,7 +60,7 @@
 
 config IOMMU_IO_PGTABLE_FAST
 	bool "Fast ARMv7/v8 Long Descriptor Format"
-	select IOMMU_IO_PGTABLE
+	depends on ARM64_DMA_USE_IOMMU || ARM_DMA_USE_IOMMU
 	help
           Enable support for a subset of the ARM long descriptor pagetable
 	  format.  This allocator achieves fast performance by
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 1719336..c3376df 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -579,7 +579,6 @@
 	{ ARM_SMMU_OPT_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
 	{ ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
 	{ ARM_SMMU_OPT_HALT, "qcom,enable-smmu-halt"},
-	{ ARM_SMMU_OPT_HIBERNATION, "qcom,hibernation-support"},
 	{ 0, NULL},
 };
 
@@ -607,6 +606,7 @@
 static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu);
 static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain);
 static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain);
+static bool arm_smmu_opt_hibernation(struct arm_smmu_device *smmu);
 
 static int msm_secure_smmu_map(struct iommu_domain *domain, unsigned long iova,
 			       phys_addr_t paddr, size_t size, int prot);
@@ -635,6 +635,13 @@
 				arm_smmu_options[i].prop);
 		}
 	} while (arm_smmu_options[++i].opt);
+
+	if (arm_smmu_opt_hibernation(smmu) &&
+	    smmu->options && ARM_SMMU_OPT_SKIP_INIT) {
+		dev_info(smmu->dev,
+			 "Disabling incompatible option: skip-init\n");
+		smmu->options &= ~ARM_SMMU_OPT_SKIP_INIT;
+	}
 }
 
 static bool is_dynamic_domain(struct iommu_domain *domain)
@@ -707,7 +714,7 @@
 
 static bool arm_smmu_opt_hibernation(struct arm_smmu_device *smmu)
 {
-	return smmu->options & ARM_SMMU_OPT_HIBERNATION;
+	return IS_ENABLED(CONFIG_HIBERNATION);
 }
 
 /*
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 88bbc8c..1612d3a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1612,8 +1612,7 @@
 	 * flush. However, device IOTLB doesn't need to be flushed in this case.
 	 */
 	if (!cap_caching_mode(iommu->cap) || !map)
-		iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
-				      addr, mask);
+		iommu_flush_dev_iotlb(domain, addr, mask);
 }
 
 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index aee1c60..cc58b1b 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -133,6 +133,8 @@
 
 	for (np = of_find_matching_node(NULL, its_device_id); np;
 	     np = of_find_matching_node(np, its_device_id)) {
+		if (!of_device_is_available(np))
+			continue;
 		if (!of_property_read_bool(np, "msi-controller"))
 			continue;
 
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
index 470b4aa..e4768fc 100644
--- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -80,6 +80,8 @@
 
 	for (np = of_find_matching_node(NULL, its_device_id); np;
 	     np = of_find_matching_node(np, its_device_id)) {
+		if (!of_device_is_available(np))
+			continue;
 		if (!of_property_read_bool(np, "msi-controller"))
 			continue;
 
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index ac15e5d..558c758 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1807,6 +1807,8 @@
 
 	for (np = of_find_matching_node(node, its_device_id); np;
 	     np = of_find_matching_node(np, its_device_id)) {
+		if (!of_device_is_available(np))
+			continue;
 		if (!of_property_read_bool(np, "msi-controller")) {
 			pr_warn("%s: no msi-controller property, ITS ignored\n",
 				np->full_name);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 2f0f448..cb839bd 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -44,6 +44,34 @@
 
 #include "irq-gic-common.h"
 
+#define MAX_IRQ			1020U	/* Max number of SGI+PPI+SPI */
+#define SPI_START_IRQ		32	/* SPI start irq number */
+#define GICD_ICFGR_BITS		2	/* 2 bits per irq in GICD_ICFGR */
+#define GICD_ISENABLER_BITS	1	/* 1 bit per irq in GICD_ISENABLER */
+#define GICD_IPRIORITYR_BITS	8	/* 8 bits per irq in GICD_IPRIORITYR */
+
+/* 32 bit mask with lower n bits set */
+#define UMASK_LOW(n)		(~0U >> (32 - (n)))
+
+/* Number of 32-bit words required to store all irqs, for
+ * registers where each word stores configuration for each irq
+ * in bits_per_irq bits.
+ */
+#define NUM_IRQ_WORDS(bits_per_irq)	(DIV_ROUND_UP(MAX_IRQ, \
+						      32 / (bits_per_irq)))
+#define MAX_IRQS_IGNORE		10
+
+#define IRQ_NR_BOUND(nr)	min((nr), MAX_IRQ)
+
+/* Bitmap to irqs, which are restored */
+static DECLARE_BITMAP(irqs_restore, MAX_IRQ);
+
+/* Bitmap to irqs, for which restore is ignored.
+ * Presently, only GICD_IROUTER mismatches are
+ * ignored.
+ */
+static DECLARE_BITMAP(irqs_ignore_restore, MAX_IRQ);
+
 struct redist_region {
 	void __iomem		*redist_base;
 	phys_addr_t		phys_base;
@@ -60,6 +88,16 @@
 	u32			nr_redist_regions;
 	unsigned int		irq_nr;
 	struct partition_desc	*ppi_descs[16];
+
+	u64 saved_spi_router[MAX_IRQ];
+	u32 saved_spi_enable[NUM_IRQ_WORDS(GICD_ISENABLER_BITS)];
+	u32 saved_spi_cfg[NUM_IRQ_WORDS(GICD_ICFGR_BITS)];
+	u32 saved_spi_priority[NUM_IRQ_WORDS(GICD_IPRIORITYR_BITS)];
+
+	u64 changed_spi_router[MAX_IRQ];
+	u32 changed_spi_enable[NUM_IRQ_WORDS(GICD_ISENABLER_BITS)];
+	u32 changed_spi_cfg[NUM_IRQ_WORDS(GICD_ICFGR_BITS)];
+	u32 changed_spi_priority[NUM_IRQ_WORDS(GICD_IPRIORITYR_BITS)];
 };
 
 static struct gic_chip_data gic_data __read_mostly;
@@ -67,6 +105,58 @@
 
 static struct gic_kvm_info gic_v3_kvm_info;
 
+enum gicd_save_restore_reg {
+	SAVED_ICFGR,
+	SAVED_IS_ENABLER,
+	SAVED_IPRIORITYR,
+	NUM_SAVED_GICD_REGS,
+};
+
+/* Stores start address of spi config for saved gicd regs */
+static u32 *saved_spi_regs_start[NUM_SAVED_GICD_REGS] = {
+	[SAVED_ICFGR] = gic_data.saved_spi_cfg,
+	[SAVED_IS_ENABLER] = gic_data.saved_spi_enable,
+	[SAVED_IPRIORITYR] = gic_data.saved_spi_priority,
+};
+
+/* Stores start address of spi config for changed gicd regs */
+static u32 *changed_spi_regs_start[NUM_SAVED_GICD_REGS] = {
+	[SAVED_ICFGR] = gic_data.changed_spi_cfg,
+	[SAVED_IS_ENABLER] = gic_data.changed_spi_enable,
+	[SAVED_IPRIORITYR] = gic_data.changed_spi_priority,
+};
+
+/* GICD offset for saved registers */
+static u32 gicd_offset[NUM_SAVED_GICD_REGS] = {
+	[SAVED_ICFGR] = GICD_ICFGR,
+	[SAVED_IS_ENABLER] = GICD_ISENABLER,
+	[SAVED_IPRIORITYR] = GICD_IPRIORITYR,
+};
+
+/* Bits per irq word, for gicd saved registers */
+static u32 gicd_reg_bits_per_irq[NUM_SAVED_GICD_REGS] = {
+	[SAVED_ICFGR] = GICD_ICFGR_BITS,
+	[SAVED_IS_ENABLER] = GICD_ISENABLER_BITS,
+	[SAVED_IPRIORITYR] = GICD_IPRIORITYR_BITS,
+};
+
+#define for_each_spi_irq_word(i, reg) \
+	for (i = 0; \
+	    i < DIV_ROUND_UP(IRQ_NR_BOUND(gic_data.irq_nr) - SPI_START_IRQ, \
+			     32 / gicd_reg_bits_per_irq[reg]); \
+	    i++)
+
+#define read_spi_word_offset(base, reg, i) \
+	readl_relaxed_no_log(	\
+			base + gicd_offset[reg] + i * 4 +	\
+			SPI_START_IRQ * gicd_reg_bits_per_irq[reg] / 8)
+
+#define restore_spi_word_offset(base, reg, i) \
+	writel_relaxed_no_log(	\
+			saved_spi_regs_start[reg][i],\
+			base + gicd_offset[reg] + i * 4 +	\
+			SPI_START_IRQ * gicd_reg_bits_per_irq[reg] / 8)
+
 #define gic_data_rdist()		(this_cpu_ptr(gic_data.rdists.rdist))
 #define gic_data_rdist_rd_base()	(gic_data_rdist()->rd_base)
 #define gic_data_rdist_sgi_base()	(gic_data_rdist_rd_base() + SZ_64K)
@@ -134,6 +224,229 @@
 }
 #endif
 
+void gic_v3_dist_save(void)
+{
+	void __iomem *base = gic_data.dist_base;
+	int reg, i;
+
+	for (reg = SAVED_ICFGR; reg < NUM_SAVED_GICD_REGS; reg++) {
+		for_each_spi_irq_word(i, reg) {
+			saved_spi_regs_start[reg][i] =
+				read_spi_word_offset(base, reg, i);
+		}
+	}
+
+	for (i = 32; i < IRQ_NR_BOUND(gic_data.irq_nr); i++)
+		gic_data.saved_spi_router[i] =
+			gic_read_irouter(base + GICD_IROUTER + i * 8);
+}
+
+static void _gicd_check_reg(enum gicd_save_restore_reg reg)
+{
+	void __iomem *base = gic_data.dist_base;
+	u32 *saved_spi_cfg = saved_spi_regs_start[reg];
+	u32 *changed_spi_cfg = changed_spi_regs_start[reg];
+	u32 bits_per_irq = gicd_reg_bits_per_irq[reg];
+	u32 current_cfg = 0;
+	int i, j = SPI_START_IRQ, l;
+	u32 k;
+
+	for_each_spi_irq_word(i, reg) {
+		current_cfg = read_spi_word_offset(base, reg, i);
+		if (current_cfg != saved_spi_cfg[i]) {
+			for (k = current_cfg ^ saved_spi_cfg[i],
+			     l = 0; k ; k >>= bits_per_irq, l++) {
+				if (k & UMASK_LOW(bits_per_irq))
+					set_bit(j+l, irqs_restore);
+			}
+			changed_spi_cfg[i] = current_cfg ^ saved_spi_cfg[i];
+		}
+		j += 32 / bits_per_irq;
+	}
+}
+
+#define _gic_v3_dist_check_icfgr()	\
+		_gicd_check_reg(SAVED_ICFGR)
+#define _gic_v3_dist_check_ipriorityr()	\
+		_gicd_check_reg(SAVED_IPRIORITYR)
+#define _gic_v3_dist_check_isenabler()	\
+		_gicd_check_reg(SAVED_IS_ENABLER)
+
+static void _gic_v3_dist_check_irouter(void)
+{
+	void __iomem *base = gic_data.dist_base;
+	u64 current_irouter_cfg = 0;
+	int i;
+
+	for (i = 32; i < IRQ_NR_BOUND(gic_data.irq_nr); i++) {
+		if (test_bit(i, irqs_ignore_restore))
+			continue;
+		current_irouter_cfg = gic_read_irouter(
+					base + GICD_IROUTER + i * 8);
+		if (current_irouter_cfg != gic_data.saved_spi_router[i]) {
+			set_bit(i, irqs_restore);
+			gic_data.changed_spi_router[i] =
+			    current_irouter_cfg ^ gic_data.saved_spi_router[i];
+		}
+	}
+}
+
+static void _gic_v3_dist_restore_reg(enum gicd_save_restore_reg reg)
+{
+	void __iomem *base = gic_data.dist_base;
+	int i;
+
+	for_each_spi_irq_word(i, reg) {
+		if (changed_spi_regs_start[reg][i])
+			restore_spi_word_offset(base, reg, i);
+	}
+
+	/* Commit all restored configurations before subsequent writes */
+	wmb();
+}
+
+#define _gic_v3_dist_restore_icfgr()	_gic_v3_dist_restore_reg(SAVED_ICFGR)
+#define _gic_v3_dist_restore_ipriorityr()		\
+		_gic_v3_dist_restore_reg(SAVED_IPRIORITYR)
+
+static void _gic_v3_dist_restore_set_reg(u32 offset)
+{
+	void __iomem *base = gic_data.dist_base;
+	int i, j = SPI_START_IRQ, l;
+	int irq_nr = IRQ_NR_BOUND(gic_data.irq_nr) - SPI_START_IRQ;
+
+	for (i = 0; i < DIV_ROUND_UP(irq_nr, 32); i++, j += 32) {
+		u32 reg_val = readl_relaxed_no_log(base + offset + i * 4 + 4);
+		bool irqs_restore_updated = 0;
+
+		for (l = 0; l < 32; l++) {
+			if (test_bit(j+l, irqs_restore)) {
+				reg_val |= BIT(l);
+				irqs_restore_updated = 1;
+			}
+		}
+
+		if (irqs_restore_updated) {
+			writel_relaxed_no_log(
+				reg_val, base + offset + i * 4 + 4);
+		}
+	}
+
+	/* Commit restored configuration updates before subsequent writes */
+	wmb();
+}
+
+#define _gic_v3_dist_restore_isenabler()		\
+		_gic_v3_dist_restore_set_reg(GICD_ISENABLER)
+
+#define _gic_v3_dist_restore_ispending()		\
+		_gic_v3_dist_restore_set_reg(GICD_ISPENDR)
+
+static void _gic_v3_dist_restore_irouter(void)
+{
+	void __iomem *base = gic_data.dist_base;
+	int i;
+
+	for (i = 32; i < IRQ_NR_BOUND(gic_data.irq_nr); i++) {
+		if (test_bit(i, irqs_ignore_restore))
+			continue;
+		if (gic_data.changed_spi_router[i]) {
+			gic_write_irouter(gic_data.saved_spi_router[i],
+						base + GICD_IROUTER + i * 8);
+		}
+	}
+
+	/* Commit GICD_IROUTER writes before subsequent writes */
+	wmb();
+}
+
+static void _gic_v3_dist_clear_reg(u32 offset)
+{
+	void __iomem *base = gic_data.dist_base;
+	int i, j = SPI_START_IRQ, l;
+	int irq_nr = IRQ_NR_BOUND(gic_data.irq_nr) - SPI_START_IRQ;
+
+	for (i = 0; i < DIV_ROUND_UP(irq_nr, 32); i++, j += 32) {
+		u32 clear = 0;
+		bool irqs_restore_updated = 0;
+
+		for (l = 0; l < 32; l++) {
+			if (test_bit(j+l, irqs_restore)) {
+				clear |= BIT(l);
+				irqs_restore_updated = 1;
+			}
+		}
+
+		if (irqs_restore_updated) {
+			writel_relaxed_no_log(
+				clear, base + offset + i * 4 + 4);
+		}
+	}
+
+	/* Commit clearing of irq config before subsequent writes */
+	wmb();
+}
+
+#define _gic_v3_dist_set_icenabler()		\
+		_gic_v3_dist_clear_reg(GICD_ICENABLER)
+
+#define _gic_v3_dist_set_icpending()		\
+		_gic_v3_dist_clear_reg(GICD_ICPENDR)
+
+#define _gic_v3_dist_set_icactive()		\
+		_gic_v3_dist_clear_reg(GICD_ICACTIVER)
+
+/* Restore GICD state for SPIs. SPI configuration is restored
+ * for GICD_ICFGR, GICD_ISENABLER, GICD_IPRIORITYR, GICD_IROUTER
+ * registers. Following is the sequence for restore:
+ *
+ * 1. For SPIs, check whether any of GICD_ICFGR, GICD_ISENABLER,
+ *    GICD_IPRIORITYR, GICD_IROUTER, current configuration is
+ *    different from saved configuration.
+ *
+ * For all irqs, with mismatched configurations,
+ *
+ * 2. Set GICD_ICENABLER and wait for its completion.
+ *
+ * 3. Restore any changed GICD_ICFGR, GICD_IPRIORITYR, GICD_IROUTER
+ *    configurations.
+ *
+ * 4. Set GICD_ICACTIVER.
+ *
+ * 5. Set pending for the interrupt.
+ *
+ * 6. Enable interrupt and wait for its completion.
+ *
+ */
+void gic_v3_dist_restore(void)
+{
+	_gic_v3_dist_check_icfgr();
+	_gic_v3_dist_check_ipriorityr();
+	_gic_v3_dist_check_isenabler();
+	_gic_v3_dist_check_irouter();
+
+	if (bitmap_empty(irqs_restore, IRQ_NR_BOUND(gic_data.irq_nr)))
+		return;
+
+	_gic_v3_dist_set_icenabler();
+	gic_dist_wait_for_rwp();
+
+	_gic_v3_dist_restore_icfgr();
+	_gic_v3_dist_restore_ipriorityr();
+	_gic_v3_dist_restore_irouter();
+
+	_gic_v3_dist_set_icactive();
+
+	_gic_v3_dist_set_icpending();
+	_gic_v3_dist_restore_ispending();
+
+	_gic_v3_dist_restore_isenabler();
+	gic_dist_wait_for_rwp();
+
+	/* Commit all writes before proceeding */
+	wmb();
+}
+
 /*
  * gic_show_pending_irq - Shows the pending interrupts
  * Note: Interrupts should be disabled on the cpu from which
@@ -706,7 +1019,7 @@
 	       MPIDR_TO_SGI_AFFINITY(cluster_id, 1)	|
 	       tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
 
-	pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
+	pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
 	gic_write_sgi1r(val);
 }
 
@@ -1244,7 +1557,8 @@
 	struct redist_region *rdist_regs;
 	u64 redist_stride;
 	u32 nr_redist_regions;
-	int err, i;
+	int err, i, ignore_irqs_len;
+	u32 ignore_restore_irqs[MAX_IRQS_IGNORE] = {0};
 
 	dist_base = of_iomap(node, 0);
 	if (!dist_base) {
@@ -1294,6 +1608,14 @@
 
 	gic_populate_ppi_partitions(node);
 	gic_of_setup_kvm_info(node);
+
+	ignore_irqs_len = of_property_read_variable_u32_array(node,
+				"ignored-save-restore-irqs",
+				ignore_restore_irqs,
+				0, MAX_IRQS_IGNORE);
+	for (i = 0; i < ignore_irqs_len; i++)
+		set_bit(ignore_restore_irqs[i], irqs_ignore_restore);
+
 	return 0;
 
 out_unmap_rdist:
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 8cb3265..2522a3d 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -56,6 +56,7 @@
 	if (state == LED_OFF && !(led_cdev->flags & LED_KEEP_TRIGGER))
 		led_trigger_remove(led_cdev);
 	led_set_brightness(led_cdev, state);
+	led_cdev->usr_brightness_req = state;
 
 	ret = size;
 unlock:
@@ -71,7 +72,24 @@
 
 	return sprintf(buf, "%u\n", led_cdev->max_brightness);
 }
-static DEVICE_ATTR_RO(max_brightness);
+
+static ssize_t max_brightness_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	unsigned long state;
+	ssize_t ret = -EINVAL;
+
+	ret = kstrtoul(buf, 10, &state);
+	if (ret)
+		return ret;
+
+	led_cdev->max_brightness = state;
+	led_set_brightness(led_cdev, led_cdev->usr_brightness_req);
+
+	return size;
+}
+static DEVICE_ATTR_RW(max_brightness);
 
 #ifdef CONFIG_LEDS_TRIGGERS
 static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store);
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 759d853..d773ec5 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -123,6 +123,7 @@
 
 #define	FLASH_LED_REG_MULTI_STROBE_CTRL(base)	(base + 0x71)
 #define	LED3_FLASH_ONCE_ONLY_BIT		BIT(1)
+#define LED1N2_FLASH_ONCE_ONLY_BIT		BIT(0)
 
 #define	FLASH_LED_REG_LPG_INPUT_CTRL(base)	(base + 0x72)
 #define	LPG_INPUT_SEL_BIT			BIT(0)
@@ -425,7 +426,7 @@
 static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
 {
 	int rc, i, addr_offset;
-	u8 val = 0, mask;
+	u8 val = 0, mask, strobe_mask = 0, strobe_ctrl;
 
 	for (i = 0; i < led->num_fnodes; i++) {
 		addr_offset = led->fnode[i].id;
@@ -436,6 +437,51 @@
 			return rc;
 
 		val |= 0x1 << led->fnode[i].id;
+
+		if (led->fnode[i].strobe_sel == HW_STROBE) {
+			if (led->fnode[i].id == LED3)
+				strobe_mask |= LED3_FLASH_ONCE_ONLY_BIT;
+			else
+				strobe_mask |= LED1N2_FLASH_ONCE_ONLY_BIT;
+		}
+
+		if (led->fnode[i].id == LED3 &&
+				led->fnode[i].strobe_sel == LPG_STROBE)
+			strobe_mask |= LED3_FLASH_ONCE_ONLY_BIT;
+		/*
+		 * As per the hardware recommendation, to use LED2/LED3 in HW
+		 * strobe mode, LED1 should be set to HW strobe mode as well.
+		 */
+		if (led->fnode[i].strobe_sel == HW_STROBE &&
+		      (led->fnode[i].id == LED2 || led->fnode[i].id == LED3)) {
+			mask = FLASH_HW_STROBE_MASK;
+			addr_offset = led->fnode[LED1].id;
+			/*
+			 * HW_STROBE: enable, TRIGGER: level,
+			 * POLARITY: active high
+			 */
+			strobe_ctrl = BIT(2) | BIT(0);
+			rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_STROBE_CTRL(
+				led->base + addr_offset),
+				mask, strobe_ctrl);
+			if (rc < 0)
+				return rc;
+		}
+	}
+
+	rc = qpnp_flash_led_masked_write(led,
+		FLASH_LED_REG_MULTI_STROBE_CTRL(led->base),
+		strobe_mask, 0);
+	if (rc < 0)
+		return rc;
+
+	if (led->fnode[LED3].strobe_sel == LPG_STROBE) {
+		rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_LPG_INPUT_CTRL(led->base),
+			LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT);
+		if (rc < 0)
+			return rc;
 	}
 
 	rc = qpnp_flash_led_write(led,
@@ -629,19 +675,6 @@
 			return rc;
 	}
 
-	if (led->fnode[LED3].strobe_sel == LPG_STROBE) {
-		rc = qpnp_flash_led_masked_write(led,
-			FLASH_LED_REG_MULTI_STROBE_CTRL(led->base),
-			LED3_FLASH_ONCE_ONLY_BIT, 0);
-		if (rc < 0)
-			return rc;
-
-		rc = qpnp_flash_led_masked_write(led,
-			FLASH_LED_REG_LPG_INPUT_CTRL(led->base),
-			LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT);
-		if (rc < 0)
-			return rc;
-	}
 	return 0;
 }
 
diff --git a/drivers/leds/leds-qpnp-haptics.c b/drivers/leds/leds-qpnp-haptics.c
index 764657a..8a850c5 100644
--- a/drivers/leds/leds-qpnp-haptics.c
+++ b/drivers/leds/leds-qpnp-haptics.c
@@ -24,6 +24,7 @@
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
 #include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <linux/qpnp/qpnp-misc.h>
 #include <linux/qpnp/qpnp-revid.h>
@@ -321,6 +322,7 @@
 	int				sc_irq;
 	struct pwm_param		pwm_data;
 	struct hap_lra_ares_param	ares_cfg;
+	struct regulator		*vcc_pon;
 	u32				play_time_ms;
 	u32				max_play_time_ms;
 	u32				vmax_mv;
@@ -355,6 +357,7 @@
 	bool				lra_auto_mode;
 	bool				play_irq_en;
 	bool				auto_res_err_recovery_hw;
+	bool				vcc_pon_enabled;
 };
 
 static int qpnp_haptics_parse_buffer_dt(struct hap_chip *chip);
@@ -801,10 +804,29 @@
 
 	enable = atomic_read(&chip->state);
 	pr_debug("state: %d\n", enable);
+
+	if (chip->vcc_pon && enable && !chip->vcc_pon_enabled) {
+		rc = regulator_enable(chip->vcc_pon);
+		if (rc < 0)
+			pr_err("%s: could not enable vcc_pon regulator rc=%d\n",
+				 __func__, rc);
+		else
+			chip->vcc_pon_enabled = true;
+	}
+
 	rc = qpnp_haptics_play(chip, enable);
 	if (rc < 0)
 		pr_err("Error in %sing haptics, rc=%d\n",
 			enable ? "play" : "stopp", rc);
+
+	if (chip->vcc_pon && !enable && chip->vcc_pon_enabled) {
+		rc = regulator_disable(chip->vcc_pon);
+		if (rc)
+			pr_err("%s: could not disable vcc_pon regulator rc=%d\n",
+				 __func__, rc);
+		else
+			chip->vcc_pon_enabled = false;
+	}
 }
 
 static enum hrtimer_restart hap_stop_timer(struct hrtimer *timer)
@@ -2054,6 +2076,7 @@
 	struct device_node *revid_node, *misc_node;
 	const char *temp_str;
 	int rc, temp;
+	struct regulator *vcc_pon;
 
 	rc = of_property_read_u32(node, "reg", &temp);
 	if (rc < 0) {
@@ -2381,6 +2404,16 @@
 	else if (chip->play_mode == HAP_PWM)
 		rc = qpnp_haptics_parse_pwm_dt(chip);
 
+	if (of_find_property(node, "vcc_pon-supply", NULL)) {
+		vcc_pon = regulator_get(&chip->pdev->dev, "vcc_pon");
+		if (IS_ERR(vcc_pon)) {
+			rc = PTR_ERR(vcc_pon);
+			dev_err(&chip->pdev->dev,
+				"regulator get failed vcc_pon rc=%d\n", rc);
+		}
+		chip->vcc_pon = vcc_pon;
+	}
+
 	return rc;
 }
 
diff --git a/drivers/leds/leds-qpnp-vibrator-ldo.c b/drivers/leds/leds-qpnp-vibrator-ldo.c
index 6a14324..dd19dd1 100644
--- a/drivers/leds/leds-qpnp-vibrator-ldo.c
+++ b/drivers/leds/leds-qpnp-vibrator-ldo.c
@@ -65,9 +65,29 @@
 	bool			disable_overdrive;
 };
 
-static int qpnp_vib_ldo_set_voltage(struct vib_ldo_chip *chip, int new_uV)
+static inline int qpnp_vib_ldo_poll_status(struct vib_ldo_chip *chip)
 {
 	unsigned int val;
+	int ret;
+
+	ret = regmap_read_poll_timeout(chip->regmap,
+			chip->base + QPNP_VIB_LDO_REG_STATUS1, val,
+			val & QPNP_VIB_LDO_VREG_READY, 100, 1000);
+	if (ret < 0) {
+		pr_err("Vibrator LDO vreg_ready timeout, status=0x%02x, ret=%d\n",
+			val, ret);
+
+		/* Keep VIB_LDO disabled */
+		regmap_update_bits(chip->regmap,
+			chip->base + QPNP_VIB_LDO_REG_EN_CTL,
+			QPNP_VIB_LDO_EN, 0);
+	}
+
+	return ret;
+}
+
+static int qpnp_vib_ldo_set_voltage(struct vib_ldo_chip *chip, int new_uV)
+{
 	u32 vlevel;
 	u8 reg[2];
 	int ret;
@@ -86,13 +106,9 @@
 	}
 
 	if (chip->vib_enabled) {
-		ret = regmap_read_poll_timeout(chip->regmap,
-					chip->base + QPNP_VIB_LDO_REG_STATUS1,
-					val, val & QPNP_VIB_LDO_VREG_READY,
-					100, 1000);
+		ret = qpnp_vib_ldo_poll_status(chip);
 		if (ret < 0) {
-			pr_err("Vibrator LDO vreg_ready timeout, status=0x%02x, ret=%d\n",
-				val, ret);
+			pr_err("Vibrator LDO status polling timedout\n");
 			return ret;
 		}
 	}
@@ -103,7 +119,6 @@
 
 static inline int qpnp_vib_ldo_enable(struct vib_ldo_chip *chip, bool enable)
 {
-	unsigned int val;
 	int ret;
 
 	if (chip->vib_enabled == enable)
@@ -120,13 +135,9 @@
 	}
 
 	if (enable) {
-		ret = regmap_read_poll_timeout(chip->regmap,
-					chip->base + QPNP_VIB_LDO_REG_STATUS1,
-					val, val & QPNP_VIB_LDO_VREG_READY,
-					100, 1000);
+		ret = qpnp_vib_ldo_poll_status(chip);
 		if (ret < 0) {
-			pr_err("Vibrator LDO vreg_ready timeout, status=0x%02x, ret=%d\n",
-				val, ret);
+			pr_err("Vibrator LDO status polling timedout\n");
 			return ret;
 		}
 	}
@@ -430,6 +441,7 @@
 	}
 	hrtimer_cancel(&chip->stop_timer);
 	cancel_work_sync(&chip->vib_work);
+	qpnp_vib_ldo_enable(chip, false);
 	mutex_unlock(&chip->lock);
 
 	return 0;
diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c
index d2e576d..861d987 100644
--- a/drivers/leds/leds-qpnp-wled.c
+++ b/drivers/leds/leds-qpnp-wled.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2215,12 +2215,16 @@
 			return rc;
 
 		if (wled->en_ext_pfet_sc_pro) {
-			reg = QPNP_WLED_EXT_FET_DTEST2;
-			rc = qpnp_wled_sec_write_reg(wled,
+			if (!(wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE
+				&& wled->pmic_rev_id->rev4 ==
+					PMI8998_V2P0_REV4)) {
+				reg = QPNP_WLED_EXT_FET_DTEST2;
+				rc = qpnp_wled_sec_write_reg(wled,
 					QPNP_WLED_TEST1_REG(wled->ctrl_base),
 					reg);
-			if (rc)
-				return rc;
+				if (rc)
+					return rc;
+			}
 		}
 	} else {
 		rc = qpnp_wled_read_reg(wled,
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 7755271..25852e3 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -154,8 +154,8 @@
 		DBDMA_DO_STOP(rm->dma_regs);
 		return;
 	}
-	memset(rdma->buf1, 0, ARRAY_SIZE(rdma->buf1));
-	memset(rdma->buf2, 0, ARRAY_SIZE(rdma->buf2));
+	memset(rdma->buf1, 0, sizeof(rdma->buf1));
+	memset(rdma->buf2, 0, sizeof(rdma->buf2));
 
 	rm->dma_buf_v->mark = 0;
 
diff --git a/drivers/mailbox/qcom-rpmh-mailbox.c b/drivers/mailbox/qcom-rpmh-mailbox.c
index 00ebed9..c81fe52 100644
--- a/drivers/mailbox/qcom-rpmh-mailbox.c
+++ b/drivers/mailbox/qcom-rpmh-mailbox.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -129,6 +129,7 @@
 	unsigned long addr;
 	void __iomem *base; /* start address of the RSC's registers */
 	void __iomem *reg_base; /* start address for DRV specific register */
+	int irq;
 	int drv_id;
 	struct platform_device *pdev;
 	struct tcs_mbox tcs[TCS_TYPE_NR];
@@ -878,6 +879,8 @@
 {
 	int i;
 	unsigned long long curr = arch_counter_get_cntvct();
+	struct irq_data *rsc_irq_data = irq_get_irq_data(drv->irq);
+	bool irq_sts;
 
 	for (i = 0; i < drv->num_tcs; i++) {
 		if (!atomic_read(&drv->tcs_in_use[i]))
@@ -891,6 +894,20 @@
 		print_tcs_regs(drv, i);
 		print_response(drv, i);
 	}
+
+	if (rsc_irq_data) {
+		irq_get_irqchip_state(drv->irq, IRQCHIP_STATE_PENDING,
+								&irq_sts);
+		pr_warn("HW IRQ %lu is %s at GIC\n", rsc_irq_data->hwirq,
+					irq_sts ? "PENDING" : "NOT PENDING");
+	}
+
+	if (test_bit(TASKLET_STATE_SCHED, &drv->tasklet.state))
+		pr_warn("Tasklet is scheduled for execution\n");
+	else if (test_bit(TASKLET_STATE_RUN, &drv->tasklet.state))
+		pr_warn("Tasklet is running\n");
+	else
+		pr_warn("Tasklet is not active\n");
 }
 
 static void chan_debug(struct mbox_chan *chan)
@@ -980,7 +997,8 @@
 
 	/* If we were just busy waiting for TCS, dump the state and return */
 	if (ret == -EBUSY) {
-		pr_info_ratelimited("TCS Busy, retrying RPMH message send\n");
+		dev_err_ratelimited(chan->cl->dev,
+				"TCS Busy, retrying RPMH message send\n");
 		ret = -EAGAIN;
 	}
 
@@ -1261,6 +1279,8 @@
 	if (ret)
 		return ret;
 
+	drv->irq = irq;
+
 	/* Enable interrupts for AMC TCS */
 	write_tcs_reg(drv->reg_base, RSC_DRV_IRQ_ENABLE, 0, 0,
 					drv->tcs[ACTIVE_TCS].tcs_mask);
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index d23337e..dd344ee 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -284,8 +284,10 @@
 			break;						\
 									\
 		mutex_unlock(&(ca)->set->bucket_lock);			\
-		if (kthread_should_stop())				\
+		if (kthread_should_stop()) {				\
+			set_current_state(TASK_RUNNING);		\
 			return 0;					\
+		}							\
 									\
 		schedule();						\
 		mutex_lock(&(ca)->set->bucket_lock);			\
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 02619ca..7fe7df5 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -904,7 +904,7 @@
 
 int bch_flash_dev_create(struct cache_set *c, uint64_t size);
 
-int bch_cached_dev_attach(struct cached_dev *, struct cache_set *);
+int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *);
 void bch_cached_dev_detach(struct cached_dev *);
 void bch_cached_dev_run(struct cached_dev *);
 void bcache_device_stop(struct bcache_device *);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index cac297f..cf7c689 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1864,14 +1864,17 @@
 	 */
 	for_each_cache(ca, c, i) {
 		for_each_bucket(b, ca) {
-			if (fifo_full(&ca->free[RESERVE_PRIO]))
+			if (fifo_full(&ca->free[RESERVE_PRIO]) &&
+			    fifo_full(&ca->free[RESERVE_BTREE]))
 				break;
 
 			if (bch_can_invalidate_bucket(ca, b) &&
 			    !GC_MARK(b)) {
 				__bch_invalidate_one_bucket(ca, b);
-				fifo_push(&ca->free[RESERVE_PRIO],
-					  b - ca->buckets);
+				if (!fifo_push(&ca->free[RESERVE_PRIO],
+				   b - ca->buckets))
+					fifo_push(&ca->free[RESERVE_BTREE],
+						  b - ca->buckets);
 			}
 		}
 	}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 723302c..faa9bfe 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -633,11 +633,11 @@
 static void search_free(struct closure *cl)
 {
 	struct search *s = container_of(cl, struct search, cl);
-	bio_complete(s);
 
 	if (s->iop.bio)
 		bio_put(s->iop.bio);
 
+	bio_complete(s);
 	closure_debug_destroy(cl);
 	mempool_free(s, s->d->c->search);
 }
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index e4c2d5d..11c9953 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -938,7 +938,8 @@
 	cached_dev_put(dc);
 }
 
-int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+			  uint8_t *set_uuid)
 {
 	uint32_t rtime = cpu_to_le32(get_seconds());
 	struct uuid_entry *u;
@@ -947,7 +948,8 @@
 
 	bdevname(dc->bdev, buf);
 
-	if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))
+	if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
+	    (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
 		return -ENOENT;
 
 	if (dc->disk.c) {
@@ -1191,7 +1193,7 @@
 
 	list_add(&dc->list, &uncached_devices);
 	list_for_each_entry(c, &bch_cache_sets, list)
-		bch_cached_dev_attach(dc, c);
+		bch_cached_dev_attach(dc, c, NULL);
 
 	if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
 	    BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
@@ -1714,7 +1716,7 @@
 	bcache_write_super(c);
 
 	list_for_each_entry_safe(dc, t, &uncached_devices, list)
-		bch_cached_dev_attach(dc, c);
+		bch_cached_dev_attach(dc, c, NULL);
 
 	flash_devs_run(c);
 
@@ -1831,6 +1833,7 @@
 static int cache_alloc(struct cache *ca)
 {
 	size_t free;
+	size_t btree_buckets;
 	struct bucket *b;
 
 	__module_get(THIS_MODULE);
@@ -1840,9 +1843,19 @@
 	ca->journal.bio.bi_max_vecs = 8;
 	ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
 
+	/*
+	 * when ca->sb.njournal_buckets is not zero, journal exists,
+	 * and in bch_journal_replay(), tree node may split,
+	 * so bucket of RESERVE_BTREE type is needed,
+	 * the worst situation is all journal buckets are valid journal,
+	 * and all the keys need to replay,
+	 * so the number of  RESERVE_BTREE type buckets should be as much
+	 * as journal buckets
+	 */
+	btree_buckets = ca->sb.njournal_buckets ?: 8;
 	free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
 
-	if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
+	if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) ||
 	    !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
 	    !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
 	    !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 4fbb553..5a5c1f1 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -191,7 +191,7 @@
 {
 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
 					     disk.kobj);
-	ssize_t v = size;
+	ssize_t v;
 	struct cache_set *c;
 	struct kobj_uevent_env *env;
 
@@ -263,17 +263,20 @@
 	}
 
 	if (attr == &sysfs_attach) {
-		if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16)
+		uint8_t		set_uuid[16];
+
+		if (bch_parse_uuid(buf, set_uuid) < 16)
 			return -EINVAL;
 
+		v = -ENOENT;
 		list_for_each_entry(c, &bch_cache_sets, list) {
-			v = bch_cached_dev_attach(dc, c);
+			v = bch_cached_dev_attach(dc, c, set_uuid);
 			if (!v)
 				return size;
 		}
 
 		pr_err("Can't attach %s: cache set not found", buf);
-		size = v;
+		return v;
 	}
 
 	if (attr == &sysfs_detach && dc->disk.c)
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 4ce2b19..bb7aa31 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -420,18 +420,27 @@
 
 	while (!kthread_should_stop()) {
 		down_write(&dc->writeback_lock);
-		if (!atomic_read(&dc->has_dirty) ||
-		    (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
-		     !dc->writeback_running)) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		/*
+		 * If the bache device is detaching, skip here and continue
+		 * to perform writeback. Otherwise, if no dirty data on cache,
+		 * or there is dirty data on cache but writeback is disabled,
+		 * the writeback thread should sleep here and wait for others
+		 * to wake up it.
+		 */
+		if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
+		    (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
 			up_write(&dc->writeback_lock);
-			set_current_state(TASK_INTERRUPTIBLE);
 
-			if (kthread_should_stop())
+			if (kthread_should_stop()) {
+				set_current_state(TASK_RUNNING);
 				return 0;
+			}
 
 			schedule();
 			continue;
 		}
+		set_current_state(TASK_RUNNING);
 
 		searched_full_index = refill_dirty(dc);
 
@@ -441,6 +450,14 @@
 			cached_dev_put(dc);
 			SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
 			bch_write_bdev_super(dc, NULL);
+			/*
+			 * If bcache device is detaching via sysfs interface,
+			 * writeback thread should stop after there is no dirty
+			 * data on cache. BCACHE_DEV_DETACHING flag is set in
+			 * bch_cached_dev_detach().
+			 */
+			if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
+				break;
 		}
 
 		up_write(&dc->writeback_lock);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 3ec647e..809a4df 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -827,7 +827,8 @@
 	 * dm-bufio is resistant to allocation failures (it just keeps
 	 * one buffer reserved in cases all the allocations fail).
 	 * So set flags to not try too hard:
-	 *	GFP_NOIO: don't recurse into the I/O layer
+	 *	GFP_NOWAIT: don't wait; if we need to sleep we'll release our
+	 *		    mutex and wait ourselves.
 	 *	__GFP_NORETRY: don't retry and rather return failure
 	 *	__GFP_NOMEMALLOC: don't use emergency reserves
 	 *	__GFP_NOWARN: don't print a warning in case of failure
@@ -837,7 +838,7 @@
 	 */
 	while (1) {
 		if (dm_bufio_cache_size_latch != 1) {
-			b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+			b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
 			if (b)
 				return b;
 		}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 93059dd..de0f8de 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8200,6 +8200,19 @@
 	set_mask_bits(&mddev->flags, 0,
 		      BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
 
+	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+			!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+			mddev->delta_disks > 0 &&
+			mddev->pers->finish_reshape &&
+			mddev->pers->size &&
+			mddev->queue) {
+		mddev_lock_nointr(mddev);
+		md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
+		mddev_unlock(mddev);
+		set_capacity(mddev->gendisk, mddev->array_sectors);
+		revalidate_disk(mddev->gendisk);
+	}
+
 	spin_lock(&mddev->lock);
 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
 		/* We completed so min/max setting can be forgotten if used. */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 208fbf7..eb145ec 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1673,6 +1673,17 @@
 			struct md_rdev *repl =
 				conf->mirrors[conf->raid_disks + number].rdev;
 			freeze_array(conf, 0);
+			if (atomic_read(&repl->nr_pending)) {
+				/* It means that some queued IO of retry_list
+				 * hold repl. Thus, we cannot set replacement
+				 * as NULL, avoiding rdev NULL pointer
+				 * dereference in sync_request_write and
+				 * handle_write_finished.
+				 */
+				err = -EBUSY;
+				unfreeze_array(conf);
+				goto abort;
+			}
 			clear_bit(Replacement, &repl->flags);
 			p->rdev = repl;
 			conf->mirrors[conf->raid_disks + number].rdev = NULL;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 2b04c720..9b982d4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2636,7 +2636,8 @@
 		for (m = 0; m < conf->copies; m++) {
 			int dev = r10_bio->devs[m].devnum;
 			rdev = conf->mirrors[dev].rdev;
-			if (r10_bio->devs[m].bio == NULL)
+			if (r10_bio->devs[m].bio == NULL ||
+				r10_bio->devs[m].bio->bi_end_io == NULL)
 				continue;
 			if (!r10_bio->devs[m].bio->bi_error) {
 				rdev_clear_badblocks(
@@ -2651,7 +2652,8 @@
 					md_error(conf->mddev, rdev);
 			}
 			rdev = conf->mirrors[dev].replacement;
-			if (r10_bio->devs[m].repl_bio == NULL)
+			if (r10_bio->devs[m].repl_bio == NULL ||
+				r10_bio->devs[m].repl_bio->bi_end_io == NULL)
 				continue;
 
 			if (!r10_bio->devs[m].repl_bio->bi_error) {
@@ -4682,17 +4684,11 @@
 		return;
 
 	if (mddev->delta_disks > 0) {
-		sector_t size = raid10_size(mddev, 0, 0);
-		md_set_array_sectors(mddev, size);
 		if (mddev->recovery_cp > mddev->resync_max_sectors) {
 			mddev->recovery_cp = mddev->resync_max_sectors;
 			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 		}
-		mddev->resync_max_sectors = size;
-		if (mddev->queue) {
-			set_capacity(mddev->gendisk, mddev->array_sectors);
-			revalidate_disk(mddev->gendisk);
-		}
+		mddev->resync_max_sectors = mddev->array_sectors;
 	} else {
 		int d;
 		rcu_read_lock();
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a7549a4..8de95a5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2049,15 +2049,16 @@
 static int grow_stripes(struct r5conf *conf, int num)
 {
 	struct kmem_cache *sc;
+	size_t namelen = sizeof(conf->cache_name[0]);
 	int devs = max(conf->raid_disks, conf->previous_raid_disks);
 
 	if (conf->mddev->gendisk)
-		sprintf(conf->cache_name[0],
+		snprintf(conf->cache_name[0], namelen,
 			"raid%d-%s", conf->level, mdname(conf->mddev));
 	else
-		sprintf(conf->cache_name[0],
+		snprintf(conf->cache_name[0], namelen,
 			"raid%d-%p", conf->level, conf->mddev);
-	sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
+	snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
 
 	conf->active_name = 0;
 	sc = kmem_cache_create(conf->cache_name[conf->active_name],
@@ -7614,13 +7615,7 @@
 
 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
 
-		if (mddev->delta_disks > 0) {
-			md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
-			if (mddev->queue) {
-				set_capacity(mddev->gendisk, mddev->array_sectors);
-				revalidate_disk(mddev->gendisk);
-			}
-		} else {
+		if (mddev->delta_disks <= 0) {
 			int d;
 			spin_lock_irq(&conf->device_lock);
 			mddev->degraded = calc_degraded(conf);
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 99ba8d6..427ece1 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -2282,6 +2282,10 @@
 				&dev->i2c_bus[2].i2c_adap,
 				"cx25840", 0x88 >> 1, NULL);
 		if (dev->sd_cx25840) {
+			/* set host data for clk_freq configuration */
+			v4l2_set_subdev_hostdata(dev->sd_cx25840,
+						&dev->clk_freq);
+
 			dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE;
 			v4l2_subdev_call(dev->sd_cx25840, core, load_fw);
 		}
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index c86b109..dcbb3a2 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -872,6 +872,16 @@
 	if (cx23885_boards[dev->board].clk_freq > 0)
 		dev->clk_freq = cx23885_boards[dev->board].clk_freq;
 
+	if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
+		dev->pci->subsystem_device == 0x7137) {
+		/* Hauppauge ImpactVCBe device ID 0x7137 is populated
+		 * with an 888, and a 25Mhz crystal, instead of the
+		 * usual third overtone 50Mhz. The default clock rate must
+		 * be overridden so the cx25840 is properly configured
+		 */
+		dev->clk_freq = 25000000;
+	}
+
 	dev->pci_bus  = dev->pci->bus->number;
 	dev->pci_slot = PCI_SLOT(dev->pci->devfn);
 	cx23885_irq_add(dev, 0x001f00);
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index 9a5f912..0d4cacb 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -871,6 +871,10 @@
 	dev->nr = ++cx25821_devcount;
 	sprintf(dev->name, "cx25821[%d]", dev->nr);
 
+	if (dev->nr >= ARRAY_SIZE(card)) {
+		CX25821_INFO("dev->nr >= %zd", ARRAY_SIZE(card));
+		return -ENODEV;
+	}
 	if (dev->pci->device != 0x8210) {
 		pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
 			__func__, dev->pci->device);
@@ -886,9 +890,6 @@
 		dev->channels[i].sram_channels = &cx25821_sram_channels[i];
 	}
 
-	if (dev->nr > 1)
-		CX25821_INFO("dev->nr > 1!");
-
 	/* board config */
 	dev->board = 1;		/* card[dev->nr]; */
 	dev->_max_num_decoders = MAX_DECODERS;
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index 8beffc4..891b738 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -355,7 +355,7 @@
 {
 	int rc = 0;
 
-	if (!ctx->state_machine) {
+	if (!ctx || !ctx->state_machine) {
 		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
@@ -384,7 +384,7 @@
 {
 	int rc = 0;
 
-	if (!ctx->state_machine) {
+	if (!ctx || !ctx->state_machine) {
 		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 5e4ff0d..90603de 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -22,11 +22,6 @@
 #include "cam_cpas_hw_intf.h"
 #include "cam_cpas_soc.h"
 
-#define CAM_CPAS_AXI_MIN_MNOC_AB_BW   (2048 * 1024)
-#define CAM_CPAS_AXI_MIN_MNOC_IB_BW   (2048 * 1024)
-#define CAM_CPAS_AXI_MIN_CAMNOC_AB_BW (2048 * 1024)
-#define CAM_CPAS_AXI_MIN_CAMNOC_IB_BW (3000000000L)
-
 static uint cam_min_camnoc_ib_bw;
 module_param(cam_min_camnoc_ib_bw, uint, 0644);
 
@@ -82,8 +77,8 @@
 	if (level == bus_client->curr_vote_level)
 		return 0;
 
-	CAM_DBG(CAM_CPAS, "Bus client[%d] index[%d]", bus_client->client_id,
-		level);
+	CAM_DBG(CAM_CPAS, "Bus client=[%d][%s] index[%d]",
+		bus_client->client_id, bus_client->name, level);
 	msm_bus_scale_client_update_request(bus_client->client_id, level);
 	bus_client->curr_vote_level = level;
 
@@ -152,8 +147,8 @@
 	path->vectors[0].ab = ab;
 	path->vectors[0].ib = ib;
 
-	CAM_DBG(CAM_CPAS, "Bus client[%d] :ab[%llu] ib[%llu], index[%d]",
-		bus_client->client_id, ab, ib, idx);
+	CAM_DBG(CAM_CPAS, "Bus client=[%d][%s] :ab[%llu] ib[%llu], index[%d]",
+		bus_client->client_id, bus_client->name, ab, ib, idx);
 	msm_bus_scale_client_update_request(bus_client->client_id, idx);
 
 	return 0;
@@ -208,10 +203,12 @@
 	bus_client->num_paths = pdata->usecase[0].num_paths;
 	bus_client->curr_vote_level = 0;
 	bus_client->valid = true;
+	bus_client->name = pdata->name;
 	mutex_init(&bus_client->lock);
 
-	CAM_DBG(CAM_CPAS, "Bus Client : src=%d, dst=%d, bus_client=%d",
-		bus_client->src, bus_client->dst, bus_client->client_id);
+	CAM_DBG(CAM_CPAS, "Bus Client=[%d][%s] : src=%d, dst=%d",
+		bus_client->client_id, bus_client->name,
+		bus_client->src, bus_client->dst);
 
 	return 0;
 fail_unregister_client:
@@ -463,6 +460,7 @@
 {
 	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
 	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
 	int reg_base_index = cpas_core->regbase_index[reg_base];
 	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
 	int rc = 0;
@@ -478,9 +476,12 @@
 		return -EINVAL;
 
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
 
 	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "client has not started%d", client_indx);
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
 		rc = -EPERM;
 		goto unlock_client;
 	}
@@ -503,6 +504,7 @@
 {
 	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
 	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
 	int reg_base_index = cpas_core->regbase_index[reg_base];
 	uint32_t reg_value;
 	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
@@ -522,9 +524,12 @@
 		return -EINVAL;
 
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
 
 	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "client has not started%d", client_indx);
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
 		rc = -EPERM;
 		goto unlock_client;
 	}
@@ -580,8 +585,9 @@
 			soc_private->camnoc_axi_clk_bw_margin) / 100;
 
 		if ((required_camnoc_bw > 0) &&
-			(required_camnoc_bw < CAM_CPAS_AXI_MIN_CAMNOC_IB_BW))
-			required_camnoc_bw = CAM_CPAS_AXI_MIN_CAMNOC_IB_BW;
+			(required_camnoc_bw <
+			soc_private->camnoc_axi_min_ib_bw))
+			required_camnoc_bw = soc_private->camnoc_axi_min_ib_bw;
 
 		clk_rate = required_camnoc_bw / soc_private->camnoc_bus_width;
 
@@ -695,6 +701,7 @@
 {
 	struct cam_axi_vote axi_vote;
 	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
 	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
 	int rc = 0;
 
@@ -719,16 +726,20 @@
 
 	mutex_lock(&cpas_hw->hw_mutex);
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
 
 	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "client has not started %d", client_indx);
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
 		rc = -EPERM;
 		goto unlock_client;
 	}
 
 	CAM_DBG(CAM_CPAS,
-		"Client[%d] Requested compressed[%llu], uncompressed[%llu]",
-		client_indx, axi_vote.compressed_bw,
+		"Client=[%d][%s][%d] Requested compressed[%llu], uncompressed[%llu]",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, axi_vote.compressed_bw,
 		axi_vote.uncompressed_bw);
 
 	rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
@@ -809,7 +820,8 @@
 	mutex_lock(&ahb_bus_client->lock);
 	cpas_client->ahb_level = required_level;
 
-	CAM_DBG(CAM_CPAS, "Clients required level[%d], curr_level[%d]",
+	CAM_DBG(CAM_CPAS, "Client=[%d][%s] required level[%d], curr_level[%d]",
+		ahb_bus_client->client_id, ahb_bus_client->name,
 		required_level, ahb_bus_client->curr_vote_level);
 
 	if (required_level == ahb_bus_client->curr_vote_level)
@@ -853,6 +865,7 @@
 {
 	struct cam_ahb_vote ahb_vote;
 	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
 	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
 	int rc = 0;
 
@@ -875,17 +888,21 @@
 
 	mutex_lock(&cpas_hw->hw_mutex);
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
 
 	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "client has not started %d", client_indx);
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
 		rc = -EPERM;
 		goto unlock_client;
 	}
 
 	CAM_DBG(CAM_CPAS,
-		"client[%d] : type[%d], level[%d], freq[%ld], applied[%d]",
-		client_indx, ahb_vote.type, ahb_vote.vote.level,
-		ahb_vote.vote.freq,
+		"client=[%d][%s][%d] : type[%d], level[%d], freq[%ld], applied[%d]",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, ahb_vote.type,
+		ahb_vote.vote.level, ahb_vote.vote.freq,
 		cpas_core->cpas_client[client_indx]->ahb_level);
 
 	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw,
@@ -948,32 +965,37 @@
 
 	mutex_lock(&cpas_hw->hw_mutex);
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
 
 	if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "client is not registered %d", client_indx);
+		CAM_ERR(CAM_CPAS, "client=[%d] is not registered",
+			client_indx);
 		rc = -EPERM;
 		goto done;
 	}
 
 	if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "Client %d is in start state", client_indx);
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] is in start state",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
 		rc = -EPERM;
 		goto done;
 	}
 
-	cpas_client = cpas_core->cpas_client[client_indx];
-
-	CAM_DBG(CAM_CPAS, "AHB :client[%d] type[%d], level[%d], applied[%d]",
-		client_indx, ahb_vote->type, ahb_vote->vote.level,
-		cpas_client->ahb_level);
+	CAM_DBG(CAM_CPAS,
+		"AHB :client=[%d][%s][%d] type[%d], level[%d], applied[%d]",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index,
+		ahb_vote->type, ahb_vote->vote.level, cpas_client->ahb_level);
 	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
 		ahb_vote, &applied_level);
 	if (rc)
 		goto done;
 
 	CAM_DBG(CAM_CPAS,
-		"AXI client[%d] compressed_bw[%llu], uncompressed_bw[%llu]",
-		client_indx, axi_vote->compressed_bw,
+		"AXI client=[%d][%s][%d] compressed_bw[%llu], uncompressed_bw[%llu]",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, axi_vote->compressed_bw,
 		axi_vote->uncompressed_bw);
 	rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
 		cpas_client, axi_vote);
@@ -1010,9 +1032,9 @@
 	cpas_client->started = true;
 	cpas_core->streamon_clients++;
 
-	CAM_DBG(CAM_CPAS, "client=%s, streamon_clients=%d",
-		soc_private->client_name[client_indx],
-		cpas_core->streamon_clients);
+	CAM_DBG(CAM_CPAS, "client=[%d][%s][%d] streamon_clients=%d",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, cpas_core->streamon_clients);
 done:
 	mutex_unlock(&cpas_core->client_mutex[client_indx]);
 	mutex_unlock(&cpas_hw->hw_mutex);
@@ -1062,18 +1084,20 @@
 
 	mutex_lock(&cpas_hw->hw_mutex);
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
 
-	CAM_DBG(CAM_CPAS, "client=%s, streamon_clients=%d",
-		soc_private->client_name[client_indx],
-		cpas_core->streamon_clients);
+	CAM_DBG(CAM_CPAS, "Client=[%d][%s][%d] streamon_clients=%d",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, cpas_core->streamon_clients);
 
 	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "Client %d is not started", client_indx);
+		CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] is not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
 		rc = -EPERM;
 		goto done;
 	}
 
-	cpas_client = cpas_core->cpas_client[client_indx];
 	cpas_client->started = false;
 	cpas_core->streamon_clients--;
 
@@ -1207,8 +1231,9 @@
 		cpas_client, client_indx);
 	if (rc) {
 		CAM_ERR(CAM_CPAS,
-			"axi_port_insert failed client_indx=%d, rc=%d",
-			client_indx, rc);
+			"axi_port_insert failed Client=[%d][%s][%d], rc=%d",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index, rc);
 		kfree(cpas_client);
 		mutex_unlock(&cpas_hw->hw_mutex);
 		return -EINVAL;
@@ -1223,8 +1248,9 @@
 
 	mutex_unlock(&cpas_hw->hw_mutex);
 
-	CAM_DBG(CAM_CPAS, "client_indx=%d, registered_clients=%d",
-		client_indx, cpas_core->registered_clients);
+	CAM_DBG(CAM_CPAS, "client=[%d][%s][%d], registered_clients=%d",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, cpas_core->registered_clients);
 
 	return 0;
 }
@@ -1233,6 +1259,7 @@
 	uint32_t client_handle)
 {
 	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
 	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
 	int rc = 0;
 
@@ -1241,15 +1268,20 @@
 
 	mutex_lock(&cpas_hw->hw_mutex);
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
 
 	if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "client not registered %d", client_indx);
+		CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] not registered",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
 		rc = -EPERM;
 		goto done;
 	}
 
 	if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		CAM_ERR(CAM_CPAS, "Client %d is not stopped", client_indx);
+		CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] is not stopped",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
 		rc = -EPERM;
 		goto done;
 	}
@@ -1257,8 +1289,9 @@
 	cam_cpas_util_remove_client_from_axi_port(
 		cpas_core->cpas_client[client_indx]);
 
-	CAM_DBG(CAM_CPAS, "client_indx=%d, registered_clients=%d",
-		client_indx, cpas_core->registered_clients);
+	CAM_DBG(CAM_CPAS, "client=[%d][%s][%d], registered_clients=%d",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, cpas_core->registered_clients);
 
 	kfree(cpas_core->cpas_client[client_indx]);
 	cpas_core->cpas_client[client_indx] = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
index 2e660b1..d51b152 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -20,6 +20,11 @@
 #define CAM_CPAS_MAX_CLIENTS 30
 #define CAM_CPAS_INFLIGHT_WORKS 5
 
+#define CAM_CPAS_AXI_MIN_MNOC_AB_BW   (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_MNOC_IB_BW   (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_CAMNOC_AB_BW (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_CAMNOC_IB_BW (3000000000L)
+
 #define CAM_CPAS_GET_CLIENT_IDX(handle) (handle)
 #define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx)
 
@@ -118,6 +123,7 @@
  * @dyn_vote: Whether dynamic voting enabled
  * @lock: Mutex lock used while voting on this client
  * @valid: Whether bus client is valid
+ * @name: Name of the bus client
  *
  */
 struct cam_cpas_bus_client {
@@ -131,6 +137,7 @@
 	bool dyn_vote;
 	struct mutex lock;
 	bool valid;
+	const char *name;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
index 8f9ec14..b73b32a 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
@@ -56,6 +56,27 @@
 
 	CAM_DBG(CAM_CPAS, "CPAS HW VERSION %x", soc_private->hw_version);
 
+	soc_private->camnoc_axi_min_ib_bw = 0;
+	rc = of_property_read_u64(of_node,
+		"camnoc-axi-min-ib-bw",
+		&soc_private->camnoc_axi_min_ib_bw);
+	if (rc == -EOVERFLOW) {
+		soc_private->camnoc_axi_min_ib_bw = 0;
+		rc = of_property_read_u32(of_node,
+			"camnoc-axi-min-ib-bw",
+			(u32 *)&soc_private->camnoc_axi_min_ib_bw);
+	}
+
+	if (rc) {
+		CAM_DBG(CAM_CPAS,
+			"failed to read camnoc-axi-min-ib-bw rc:%d", rc);
+		soc_private->camnoc_axi_min_ib_bw =
+			CAM_CPAS_AXI_MIN_CAMNOC_IB_BW;
+	}
+
+	CAM_DBG(CAM_CPAS, "camnoc-axi-min-ib-bw = %llu",
+		soc_private->camnoc_axi_min_ib_bw);
+
 	soc_private->client_id_based = of_property_read_bool(of_node,
 		"client-id-based");
 
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
index 91e8d0c0..f6ae8a8 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
@@ -48,6 +48,7 @@
  * @camnoc_bus_width : CAMNOC Bus width
  * @camnoc_axi_clk_bw_margin : BW Margin in percentage to add while calculating
  *      camnoc axi clock
+ * @camnoc_axi_min_ib_bw: Min camnoc BW which varies based on target
  *
  */
 struct cam_cpas_private_soc {
@@ -66,6 +67,7 @@
 	bool control_camnoc_axi_clk;
 	uint32_t camnoc_bus_width;
 	uint32_t camnoc_axi_clk_bw_margin;
+	uint64_t camnoc_axi_min_ib_bw;
 };
 
 int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index f9985eb..7b02aac 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -504,6 +504,18 @@
 
 }
 
+static int __cam_isp_ctx_reg_upd_in_epoch_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	if (ctx_isp->frame_id == 1)
+		CAM_DBG(CAM_ISP, "Reg update for early PCR");
+	else
+		CAM_WARN(CAM_ISP,
+			"Unexpected reg update in activated substate:%d for frame_id:%lld",
+			ctx_isp->substate_activated, ctx_isp->frame_id);
+	return 0;
+}
+
 static int __cam_isp_ctx_reg_upd_in_activated_state(
 	struct cam_isp_context *ctx_isp, void *evt_data)
 {
@@ -1119,7 +1131,7 @@
 		.irq_ops = {
 			__cam_isp_ctx_handle_error,
 			__cam_isp_ctx_sof_in_epoch,
-			NULL,
+			__cam_isp_ctx_reg_upd_in_epoch_state,
 			__cam_isp_ctx_notify_sof_in_actived_state,
 			__cam_isp_ctx_notify_eof_in_actived_state,
 			__cam_isp_ctx_buf_done_in_epoch,
@@ -1325,9 +1337,7 @@
 	struct list_head                  flush_list;
 
 	INIT_LIST_HEAD(&flush_list);
-	spin_lock_bh(&ctx->lock);
 	if (list_empty(req_list)) {
-		spin_unlock_bh(&ctx->lock);
 		CAM_DBG(CAM_ISP, "request list is empty");
 		return 0;
 	}
@@ -1346,7 +1356,6 @@
 		list_del_init(&req->list);
 		list_add_tail(&req->list, &flush_list);
 	}
-	spin_unlock_bh(&ctx->lock);
 
 	list_for_each_entry_safe(req, req_temp, &flush_list, list) {
 		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
@@ -1364,16 +1373,16 @@
 				req_isp->fence_map_out[i].sync_id = -1;
 			}
 		}
-		spin_lock_bh(&ctx->lock);
 		list_add_tail(&req->list, &ctx->free_req_list);
-		spin_unlock_bh(&ctx->lock);
 	}
 
 	if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
-		!cancel_req_id_found)
-		CAM_DBG(CAM_ISP,
+		!cancel_req_id_found) {
+		CAM_INFO(CAM_ISP,
 			"Flush request id:%lld is not found in the list",
 			flush_req->req_id);
+		return -EINVAL;
+	}
 
 	return 0;
 }
@@ -1385,7 +1394,9 @@
 	int rc = 0;
 
 	CAM_DBG(CAM_ISP, "try to flush pending list");
+	spin_lock_bh(&ctx->lock);
 	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+	spin_unlock_bh(&ctx->lock);
 	CAM_DBG(CAM_ISP, "Flush request in top state %d",
 		 ctx->state);
 	return rc;
@@ -1399,13 +1410,17 @@
 	struct cam_isp_context *ctx_isp;
 
 	ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
-	spin_lock_bh(&ctx->lock);
-	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_FLUSH;
-	ctx_isp->frame_skip_count = 2;
-	spin_unlock_bh(&ctx->lock);
 
 	CAM_DBG(CAM_ISP, "Flush request in state %d", ctx->state);
 	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+
+	/* only if request is found in pending queue, move to flush state*/
+	if (!rc) {
+		spin_lock_bh(&ctx->lock);
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_FLUSH;
+		ctx_isp->frame_skip_count = 2;
+		spin_unlock_bh(&ctx->lock);
+	}
 	return rc;
 }
 
@@ -1416,10 +1431,10 @@
 	int rc = 0;
 
 	CAM_DBG(CAM_ISP, "try to flush pending list");
+	spin_lock_bh(&ctx->lock);
 	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
 
 	/* if nothing is in pending req list, change state to acquire*/
-	spin_lock_bh(&ctx->lock);
 	if (list_empty(&ctx->pending_req_list))
 		ctx->state = CAM_CTX_ACQUIRED;
 	spin_unlock_bh(&ctx->lock);
@@ -1995,8 +2010,9 @@
 	flush_req.dev_hdl = ctx->dev_hdl;
 
 	CAM_DBG(CAM_ISP, "try to flush pending list");
+	spin_lock_bh(&ctx->lock);
 	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
-
+	spin_unlock_bh(&ctx->lock);
 	ctx->state = CAM_CTX_AVAILABLE;
 
 	trace_cam_context_state("ISP", ctx);
@@ -2381,7 +2397,9 @@
 	ctx_isp->active_req_cnt = 0;
 	ctx_isp->reported_req_id = 0;
 	ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
-		CAM_ISP_CTX_ACTIVATED_APPLIED : CAM_ISP_CTX_ACTIVATED_SOF;
+		CAM_ISP_CTX_ACTIVATED_APPLIED :
+		(req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH :
+		CAM_ISP_CTX_ACTIVATED_SOF;
 
 	/*
 	 * Only place to change state before calling the hw due to
@@ -2399,6 +2417,11 @@
 		goto end;
 	}
 	CAM_DBG(CAM_ISP, "start device success");
+
+	if (req_isp->num_fence_map_out) {
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &ctx->active_req_list);
+	}
 end:
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 38a4497..c1aa501 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -210,7 +210,8 @@
 	int rc = -1;
 	struct cam_hw_intf      *hw_intf;
 
-	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+	/* Start slave (which is right split) first */
+	for (i = CAM_ISP_HW_SPLIT_MAX - 1; i >= 0; i--) {
 		if (!isp_hw_res->hw_res[i])
 			continue;
 		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
@@ -959,6 +960,7 @@
 {
 	int rc = -1;
 	int i;
+	int master_idx = -1;
 
 	struct cam_ife_hw_mgr               *ife_hw_mgr;
 	struct cam_ife_hw_mgr_res           *csid_res;
@@ -1010,18 +1012,27 @@
 			if (!cid_res->hw_res[i])
 				continue;
 
-			csid_acquire.node_res = NULL;
-			if (csid_res->is_dual_vfe) {
-				if (i == CAM_ISP_HW_SPLIT_LEFT)
-					csid_acquire.sync_mode =
-						CAM_ISP_HW_SYNC_MASTER;
-				else
-					csid_acquire.sync_mode =
-						CAM_ISP_HW_SYNC_SLAVE;
-			}
-
 			hw_intf = ife_hw_mgr->csid_devices[
 				cid_res->hw_res[i]->hw_intf->hw_idx];
+
+			csid_acquire.node_res = NULL;
+			if (csid_res->is_dual_vfe) {
+				if (i == CAM_ISP_HW_SPLIT_LEFT) {
+					master_idx = hw_intf->hw_idx;
+					csid_acquire.sync_mode =
+						CAM_ISP_HW_SYNC_MASTER;
+				} else {
+					if (master_idx == -1) {
+						CAM_ERR(CAM_ISP,
+							"No Master found");
+						goto err;
+					}
+					csid_acquire.sync_mode =
+						CAM_ISP_HW_SYNC_SLAVE;
+					csid_acquire.master_idx = master_idx;
+				}
+			}
+
 			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
 				&csid_acquire, sizeof(csid_acquire));
 			if (rc) {
@@ -1356,6 +1367,7 @@
 	uint32_t                           num_rdi_port_per_in = 0;
 	uint32_t                           total_pix_port = 0;
 	uint32_t                           total_rdi_port = 0;
+	uint32_t                           in_port_length = 0;
 
 	CAM_DBG(CAM_ISP, "Enter...");
 
@@ -1416,9 +1428,27 @@
 			isp_resource[i].res_hdl,
 			isp_resource[i].length);
 
+		in_port_length = sizeof(struct cam_isp_in_port_info);
+
+		if (in_port_length > isp_resource[i].length) {
+			CAM_ERR(CAM_ISP, "buffer size is not enough");
+			rc = -EINVAL;
+			goto free_res;
+		}
+
 		in_port = memdup_user((void __user *)isp_resource[i].res_hdl,
 			isp_resource[i].length);
 		if (!IS_ERR(in_port)) {
+			in_port_length = sizeof(struct cam_isp_in_port_info) +
+				(in_port->num_out_res - 1) *
+				sizeof(struct cam_isp_out_port_info);
+			if (in_port_length > isp_resource[i].length) {
+				CAM_ERR(CAM_ISP, "buffer size is not enough");
+				rc = -EINVAL;
+				kfree(in_port);
+				goto free_res;
+			}
+
 			rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port,
 				&num_pix_port_per_in, &num_rdi_port_per_in);
 			total_pix_port += num_pix_port_per_in;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index d20450c..3edae4a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -851,7 +851,6 @@
 	path_data->cid = reserve->cid;
 	path_data->in_format = reserve->in_port->format;
 	path_data->out_format = reserve->out_port->format;
-	path_data->master_idx = reserve->master_idx;
 	path_data->sync_mode = reserve->sync_mode;
 	path_data->height  = reserve->in_port->height;
 	path_data->start_line = reserve->in_port->line_start;
@@ -877,9 +876,11 @@
 		goto end;
 	}
 
-	CAM_DBG(CAM_ISP, "Res id: %d height:%d line_start %d line_stop %d",
+	CAM_DBG(CAM_ISP,
+		"Res id: %d height:%d line_start %d line_stop %d crop_en %d",
 		reserve->res_id, reserve->in_port->height,
-		reserve->in_port->line_start, reserve->in_port->line_stop);
+		reserve->in_port->line_start, reserve->in_port->line_stop,
+		path_data->crop_enable);
 
 	if (reserve->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
 		path_data->dt = CAM_IFE_CSID_TPG_DT_VAL;
@@ -893,20 +894,23 @@
 		path_data->start_pixel = reserve->in_port->left_start;
 		path_data->end_pixel = reserve->in_port->left_stop;
 		path_data->width  = reserve->in_port->left_width;
-		CAM_DBG(CAM_ISP, "CSID:%dmaster:startpixel 0x%x endpixel:0x%x",
+		CAM_DBG(CAM_ISP, "CSID:%d master:startpixel 0x%x endpixel:0x%x",
 			csid_hw->hw_intf->hw_idx, path_data->start_pixel,
 			path_data->end_pixel);
-		CAM_DBG(CAM_ISP, "CSID:%dmaster:line start:0x%x line end:0x%x",
+		CAM_DBG(CAM_ISP, "CSID:%d master:line start:0x%x line end:0x%x",
 			csid_hw->hw_intf->hw_idx, path_data->start_line,
 			path_data->end_line);
 	} else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) {
+		path_data->master_idx = reserve->master_idx;
+		CAM_DBG(CAM_ISP, "CSID:%d master_idx=%d",
+			csid_hw->hw_intf->hw_idx, path_data->master_idx);
 		path_data->start_pixel = reserve->in_port->right_start;
 		path_data->end_pixel = reserve->in_port->right_stop;
 		path_data->width  = reserve->in_port->right_width;
 		CAM_DBG(CAM_ISP, "CSID:%d slave:start:0x%x end:0x%x width 0x%x",
 			csid_hw->hw_intf->hw_idx, path_data->start_pixel,
 			path_data->end_pixel, path_data->width);
-		CAM_DBG(CAM_ISP, "CSID:%dmaster:line start:0x%x line end:0x%x",
+		CAM_DBG(CAM_ISP, "CSID:%d slave:line start:0x%x line end:0x%x",
 			csid_hw->hw_intf->hw_idx, path_data->start_line,
 			path_data->end_line);
 	} else {
@@ -1431,19 +1435,6 @@
 	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
 		csid_reg->ipp_reg->csid_ipp_line_drop_period_addr);
 
-	/*Set master or slave IPP */
-	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
-		/*Set halt mode as master */
-		val = CSID_HALT_MODE_MASTER << 2;
-	else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
-		/*Set halt mode as slave and set master idx */
-		val = path_data->master_idx  << 4 | CSID_HALT_MODE_SLAVE << 2;
-	else
-		/* Default is internal halt mode */
-		val = 0;
-
-	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
-		csid_reg->ipp_reg->csid_ipp_ctrl_addr);
 
 	/* Enable the IPP path */
 	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
@@ -1545,19 +1536,31 @@
 
 	CAM_DBG(CAM_ISP, "Enable IPP path");
 
-	/* Resume at frame boundary */
-	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
-		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
-			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+
+	/* Set master or slave IPP */
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
+		/*Set halt mode as master */
+		val = CSID_HALT_MODE_MASTER << 2;
+	else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
+		/*Set halt mode as slave and set master idx */
+		val = path_data->master_idx  << 4 | CSID_HALT_MODE_SLAVE << 2;
+	else
+		/* Default is internal halt mode */
+		val = 0;
+
+	/*
+	 * Resume at frame boundary if Master or No Sync.
+	 * Slave will get resume command from Master.
+	 */
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
+		path_data->sync_mode == CAM_ISP_HW_SYNC_NONE)
 		val |= CAM_CSID_RESUME_AT_FRAME_BOUNDARY;
-		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
-			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
-	} else if (path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
-		cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
-			soc_info->reg_map[0].mem_base +
-			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
-	}
-	/* for slave mode, not need to resume for slave device */
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+
+	CAM_DBG(CAM_ISP, "CSID:%d IPP Ctrl val: 0x%x",
+			csid_hw->hw_intf->hw_idx, val);
 
 	/* Enable the required ipp interrupts */
 	val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;
@@ -1569,6 +1572,7 @@
 
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+	CAM_DBG(CAM_ISP, "Enable IPP IRQ mask 0x%x", val);
 
 	res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 
@@ -2618,38 +2622,46 @@
 
 	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOT_IRQ) {
 		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED) {
-			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL0_EOT_CAPTURED",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL0_EOT_CAPTURED",
 				csid_hw->hw_intf->hw_idx);
 		}
 		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED) {
-			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL1_EOT_CAPTURED",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL1_EOT_CAPTURED",
 				csid_hw->hw_intf->hw_idx);
 		}
 		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED) {
-			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL2_EOT_CAPTURED",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL2_EOT_CAPTURED",
 				csid_hw->hw_intf->hw_idx);
 		}
 		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED) {
-			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL3_EOT_CAPTURED",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL3_EOT_CAPTURED",
 				csid_hw->hw_intf->hw_idx);
 		}
 	}
 
 	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOT_IRQ) {
 		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED) {
-			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL0_SOT_CAPTURED",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL0_SOT_CAPTURED",
 				csid_hw->hw_intf->hw_idx);
 		}
 		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED) {
-			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL1_SOT_CAPTURED",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL1_SOT_CAPTURED",
 				csid_hw->hw_intf->hw_idx);
 		}
 		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED) {
-			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL2_SOT_CAPTURED",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL2_SOT_CAPTURED",
 				csid_hw->hw_intf->hw_idx);
 		}
 		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED) {
-			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL3_SOT_CAPTURED",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL3_SOT_CAPTURED",
 				csid_hw->hw_intf->hw_idx);
 		}
 	}
@@ -2709,16 +2721,17 @@
 
 		if ((irq_status_ipp & CSID_PATH_INFO_INPUT_SOF) &&
 			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ))
-			CAM_ERR(CAM_ISP, "CSID:%d IPP SOF received",
+			CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d IPP SOF received",
 				csid_hw->hw_intf->hw_idx);
 
 		if ((irq_status_ipp & CSID_PATH_INFO_INPUT_EOF) &&
 			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
-			CAM_ERR(CAM_ISP, "CSID:%d IPP EOF received",
+			CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d IPP EOF received",
 				csid_hw->hw_intf->hw_idx);
 
 		if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
-			CAM_ERR(CAM_ISP, "CSID:%d IPP fifo over flow",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d IPP fifo over flow",
 				csid_hw->hw_intf->hw_idx);
 			/*Stop IPP path immediately */
 			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
@@ -2736,14 +2749,17 @@
 
 		if ((irq_status_rdi[i] & CSID_PATH_INFO_INPUT_SOF) &&
 			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ))
-			CAM_ERR(CAM_ISP, "CSID RDI:%d SOF received", i);
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID RDI:%d SOF received", i);
 
 		if ((irq_status_rdi[i]  & CSID_PATH_INFO_INPUT_EOF) &&
 			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
-			CAM_ERR(CAM_ISP, "CSID RDI:%d EOF received", i);
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID RDI:%d EOF received", i);
 
 		if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
-			CAM_ERR(CAM_ISP, "CSID:%d RDI fifo over flow",
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d RDI fifo over flow",
 				csid_hw->hw_intf->hw_idx);
 			/*Stop RDI path immediately */
 			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
index a4ba2e1..984adf7 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index 734cbdb..8bc9bd2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -138,6 +138,9 @@
 	camif_data->first_line  = acquire_data->vfe_in.in_port->line_start;
 	camif_data->last_line   = acquire_data->vfe_in.in_port->line_stop;
 
+	CAM_DBG(CAM_ISP, "hw id:%d pix_pattern:%d dsp_mode=%d",
+		camif_res->hw_intf->hw_idx,
+		camif_data->pix_pattern, camif_data->dsp_mode);
 	return rc;
 }
 
@@ -201,6 +204,9 @@
 {
 	struct cam_vfe_mux_camif_data       *rsrc_data;
 	uint32_t                             val = 0;
+	uint32_t                             epoch0_irq_mask;
+	uint32_t                             epoch1_irq_mask;
+	uint32_t                             computed_epoch_line_cfg;
 
 	if (!camif_res) {
 		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
@@ -240,15 +246,24 @@
 		rsrc_data->common_reg->module_ctrl[
 		CAM_VFE_TOP_VER2_MODULE_STATS]->cgc_ovd);
 
-	/* epoch config with 20 line */
-	cam_io_w_mb(rsrc_data->reg_data->epoch_line_cfg,
+	/* epoch config */
+	epoch0_irq_mask = ((rsrc_data->last_line - rsrc_data->first_line) / 2) +
+		rsrc_data->first_line;
+	epoch1_irq_mask = rsrc_data->reg_data->epoch_line_cfg & 0xFFFF;
+	computed_epoch_line_cfg = (epoch0_irq_mask << 16) | epoch1_irq_mask;
+	cam_io_w_mb(computed_epoch_line_cfg,
 		rsrc_data->mem_base + rsrc_data->camif_reg->epoch_irq);
+	CAM_DBG(CAM_ISP, "first_line:%u last_line:%u epoch_line_cfg: 0x%x",
+		rsrc_data->first_line, rsrc_data->last_line,
+		computed_epoch_line_cfg);
 
 	camif_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 
 	/* Reg Update */
 	cam_io_w_mb(rsrc_data->reg_data->reg_update_cmd_data,
 		rsrc_data->mem_base + rsrc_data->camif_reg->reg_update_cmd);
+	CAM_DBG(CAM_ISP, "hw id:%d RUP val:%d", camif_res->hw_intf->hw_idx,
+		rsrc_data->reg_data->reg_update_cmd_data);
 
 	CAM_DBG(CAM_ISP, "Start Camif IFE %d Done", camif_res->hw_intf->hw_idx);
 	return 0;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
index 5d6045b..be0ca18 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -34,11 +34,12 @@
 	struct cam_vfe_top_ver2_common_data common_data;
 	struct cam_isp_resource_node        mux_rsrc[CAM_VFE_TOP_VER2_MUX_MAX];
 	unsigned long                       hw_clk_rate;
-	struct cam_axi_vote                 to_be_applied_axi_vote;
 	struct cam_axi_vote                 applied_axi_vote;
-	uint32_t                            counter_to_update_axi_vote;
 	struct cam_axi_vote             req_axi_vote[CAM_VFE_TOP_VER2_MUX_MAX];
 	unsigned long                   req_clk_rate[CAM_VFE_TOP_VER2_MUX_MAX];
+	struct cam_axi_vote             last_vote[CAM_VFE_TOP_VER2_MUX_MAX *
+					CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES];
+	uint32_t                        last_counter;
 	enum cam_vfe_bw_control_action
 		axi_vote_control[CAM_VFE_TOP_VER2_MUX_MAX];
 };
@@ -128,6 +129,7 @@
 	bool start_stop)
 {
 	struct cam_axi_vote sum = {0, 0};
+	struct cam_axi_vote to_be_applied_axi_vote = {0, 0};
 	int i, rc = 0;
 	struct cam_hw_soc_info   *soc_info =
 		top_priv->common_data.soc_info;
@@ -156,6 +158,11 @@
 		sum.uncompressed_bw,
 		sum.compressed_bw);
 
+	top_priv->last_vote[top_priv->last_counter] = sum;
+	top_priv->last_counter = (top_priv->last_counter + 1) %
+		(CAM_VFE_TOP_VER2_MUX_MAX *
+		CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES);
+
 	if ((top_priv->applied_axi_vote.uncompressed_bw ==
 		sum.uncompressed_bw) &&
 		(top_priv->applied_axi_vote.compressed_bw ==
@@ -163,77 +170,60 @@
 		CAM_DBG(CAM_ISP, "BW config unchanged %llu %llu",
 			top_priv->applied_axi_vote.uncompressed_bw,
 			top_priv->applied_axi_vote.compressed_bw);
-		top_priv->counter_to_update_axi_vote = 0;
 		return 0;
 	}
 
-	if ((top_priv->to_be_applied_axi_vote.uncompressed_bw !=
-		sum.uncompressed_bw) ||
-		(top_priv->to_be_applied_axi_vote.compressed_bw !=
-		sum.compressed_bw)) {
-		// we got a new bw value to apply
-		top_priv->counter_to_update_axi_vote = 0;
-
-		top_priv->to_be_applied_axi_vote.uncompressed_bw =
-			sum.uncompressed_bw;
-		top_priv->to_be_applied_axi_vote.compressed_bw =
-			sum.compressed_bw;
-	}
-
 	if (start_stop == true) {
-		CAM_DBG(CAM_ISP,
-			"New bw in start/stop, applying bw now, counter=%d",
-			top_priv->counter_to_update_axi_vote);
-		top_priv->counter_to_update_axi_vote = 0;
-		apply_bw_update = true;
-	} else if ((top_priv->to_be_applied_axi_vote.uncompressed_bw <
-		top_priv->applied_axi_vote.uncompressed_bw) ||
-		(top_priv->to_be_applied_axi_vote.compressed_bw <
-		top_priv->applied_axi_vote.compressed_bw)) {
-		if (top_priv->counter_to_update_axi_vote >=
+		/* need to vote current request immediately */
+		to_be_applied_axi_vote = sum;
+		/* Reset everything, we can start afresh */
+		memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) *
 			(CAM_VFE_TOP_VER2_MUX_MAX *
-			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES)) {
-			CAM_DBG(CAM_ISP,
-				"New bw is less, applying bw now, counter=%d",
-				top_priv->counter_to_update_axi_vote);
-			top_priv->counter_to_update_axi_vote = 0;
-			apply_bw_update = true;
-		} else {
-			CAM_DBG(CAM_ISP,
-				"New bw is less, Defer applying bw, counter=%d",
-				top_priv->counter_to_update_axi_vote);
-
-			top_priv->counter_to_update_axi_vote++;
-			apply_bw_update = false;
-		}
+			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES));
+		top_priv->last_counter = 0;
+		top_priv->last_vote[top_priv->last_counter] = sum;
+		top_priv->last_counter = (top_priv->last_counter + 1) %
+			(CAM_VFE_TOP_VER2_MUX_MAX *
+			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES);
 	} else {
-		CAM_DBG(CAM_ISP,
-			"New bw is more, applying bw now, counter=%d",
-			top_priv->counter_to_update_axi_vote);
-		top_priv->counter_to_update_axi_vote = 0;
-		apply_bw_update = true;
+		/*
+		 * Find max bw request in last few frames. This will the bw
+		 *that we want to vote to CPAS now.
+		 */
+		for (i = 0; i < (CAM_VFE_TOP_VER2_MUX_MAX *
+			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES); i++) {
+			if (to_be_applied_axi_vote.compressed_bw <
+				top_priv->last_vote[i].compressed_bw)
+				to_be_applied_axi_vote.compressed_bw =
+					top_priv->last_vote[i].compressed_bw;
+
+			if (to_be_applied_axi_vote.uncompressed_bw <
+				top_priv->last_vote[i].uncompressed_bw)
+				to_be_applied_axi_vote.uncompressed_bw =
+					top_priv->last_vote[i].uncompressed_bw;
+		}
 	}
 
-	CAM_DBG(CAM_ISP,
-		"counter=%d, apply_bw_update=%d",
-		top_priv->counter_to_update_axi_vote,
-		apply_bw_update);
+	if ((to_be_applied_axi_vote.uncompressed_bw !=
+		top_priv->applied_axi_vote.uncompressed_bw) ||
+		(to_be_applied_axi_vote.compressed_bw !=
+		top_priv->applied_axi_vote.compressed_bw))
+		apply_bw_update = true;
+
+	CAM_DBG(CAM_ISP, "apply_bw_update=%d", apply_bw_update);
 
 	if (apply_bw_update == true) {
 		rc = cam_cpas_update_axi_vote(
 			soc_private->cpas_handle,
-			&top_priv->to_be_applied_axi_vote);
+			&to_be_applied_axi_vote);
 		if (!rc) {
 			top_priv->applied_axi_vote.uncompressed_bw =
-				top_priv->
-				to_be_applied_axi_vote.uncompressed_bw;
+			to_be_applied_axi_vote.uncompressed_bw;
 			top_priv->applied_axi_vote.compressed_bw =
-				top_priv->
 				to_be_applied_axi_vote.compressed_bw;
 		} else {
 			CAM_ERR(CAM_ISP, "BW request failed, rc=%d", rc);
 		}
-		top_priv->counter_to_update_axi_vote = 0;
 	}
 
 	return rc;
@@ -706,11 +696,12 @@
 	}
 	vfe_top->top_priv = top_priv;
 	top_priv->hw_clk_rate = 0;
-	top_priv->to_be_applied_axi_vote.compressed_bw = 0;
-	top_priv->to_be_applied_axi_vote.uncompressed_bw = 0;
 	top_priv->applied_axi_vote.compressed_bw = 0;
 	top_priv->applied_axi_vote.uncompressed_bw = 0;
-	top_priv->counter_to_update_axi_vote = 0;
+	memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) *
+		(CAM_VFE_TOP_VER2_MUX_MAX *
+		CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES));
+	top_priv->last_counter = 0;
 
 	for (i = 0, j = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
 		top_priv->mux_rsrc[i].res_type = CAM_ISP_RESOURCE_VFE_IN;
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
index f172a79..97d076a 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -770,13 +770,6 @@
 			} else {
 				CAM_ERR(CAM_JPEG, "process_cmd null ");
 			}
-			rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd(
-				hw_mgr->devices[dev_type][0]->hw_priv,
-				CAM_JPEG_CMD_SET_IRQ_CB,
-				&irq_cb, sizeof(irq_cb));
-			if (rc)
-				CAM_ERR(CAM_JPEG,
-					"CMD_SET_IRQ_CB failed %d", rc);
 
 			if (hw_mgr->devices[dev_type][0]->hw_ops.stop) {
 				rc = hw_mgr->devices[dev_type][0]->hw_ops.stop(
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
index 898997a..a60661e 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
@@ -879,6 +879,7 @@
 	if (args->num_in_map_entries == 0 || args->num_out_map_entries == 0) {
 		CAM_ERR(CAM_LRME, "Error in port number in %d, out %d",
 			args->num_in_map_entries, args->num_out_map_entries);
+		rc = -EINVAL;
 		goto error;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index adfac57..4602d6c 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -163,6 +163,34 @@
 }
 
 /**
+ * __cam_req_mgr_validate_inject_delay()
+ *
+ * @brief    : Check if any pd device is introducing inject delay
+ * @tbl      : cam_req_mgr_req_tbl
+ * @curr_idx : slot idx
+ *
+ * @return   : 0 for success, negative for failure
+ */
+static int __cam_req_mgr_validate_inject_delay(
+	struct cam_req_mgr_req_tbl  *tbl,
+	int32_t curr_idx)
+{
+	struct cam_req_mgr_tbl_slot *slot = NULL;
+
+	while (tbl) {
+		slot = &tbl->slot[curr_idx];
+		if (slot->inject_delay > 0) {
+			slot->inject_delay--;
+			return -EAGAIN;
+		}
+		__cam_req_mgr_dec_idx(&curr_idx, tbl->pd_delta,
+			tbl->num_slots);
+		tbl = tbl->next;
+	}
+	return 0;
+}
+
+/**
  * __cam_req_mgr_traverse()
  *
  * @brief    : Traverse through pd tables, it will internally cover all linked
@@ -201,14 +229,17 @@
 		tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status,
 		traverse_data->in_q->slot[curr_idx].skip_idx);
 
-	if ((slot->inject_delay > 0) &&
-		(traverse_data->self_link == true)) {
-		CAM_DBG(CAM_CRM, "Injecting Delay of one frame");
-		apply_data[tbl->pd].req_id = -1;
-		slot->inject_delay--;
-		/* This pd table is not ready to proceed with asked idx */
-		SET_FAILURE_BIT(traverse_data->result, tbl->pd);
-		return -EAGAIN;
+	if ((traverse_data->self_link == true) &&
+		(!traverse_data->inject_delay_chk)) {
+		rc = __cam_req_mgr_validate_inject_delay(tbl, curr_idx);
+		if (rc) {
+			CAM_DBG(CAM_CRM, "Injecting Delay of one frame");
+			apply_data[tbl->pd].req_id = -1;
+			/* This pd tbl not ready to proceed with asked idx */
+			SET_FAILURE_BIT(traverse_data->result, tbl->pd);
+			return -EAGAIN;
+		}
+		traverse_data->inject_delay_chk = true;
 	}
 
 	/* Check if req is ready or in skip mode or pd tbl is in skip mode */
@@ -520,6 +551,7 @@
 	traverse_data.result = 0;
 	traverse_data.validate_only = validate_only;
 	traverse_data.self_link = self_link;
+	traverse_data.inject_delay_chk = false;
 	traverse_data.open_req_cnt = link->open_req_cnt;
 	/*
 	 *  Traverse through all pd tables, if result is success,
@@ -1447,6 +1479,8 @@
 		if (idx < 0) {
 			CAM_ERR(CAM_CRM, "req_id %lld not found in input queue",
 			flush_info->req_id);
+			mutex_unlock(&link->req.lock);
+			return -EINVAL;
 		} else {
 			CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
 				flush_info->req_id, idx);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 73ffb81..025c16a 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -126,15 +126,16 @@
 
 /**
  * struct cam_req_mgr_traverse
- * @idx           : slot index
- * @result        : contains which all tables were able to apply successfully
- * @tbl           : pointer of pipeline delay based request table
- * @apply_data    : pointer which various tables will update during traverse
- * @in_q          : input request queue pointer
- * @validate_only : Whether to validate only and/or update settings
- * @self_link     : To indicate whether the check is for the given link or the
- *                  other sync link
- * @open_req_cnt  : Count of open requests yet to be serviced in the kernel.
+ * @idx              : slot index
+ * @result           : contains which all tables were able to apply successfully
+ * @tbl              : pointer of pipeline delay based request table
+ * @apply_data       : pointer which various tables will update during traverse
+ * @in_q             : input request queue pointer
+ * @validate_only    : Whether to validate only and/or update settings
+ * @self_link        : To indicate whether the check is for the given link or
+ *                     the other sync link
+ * @inject_delay_chk : if inject delay has been validated for all pd devices
+ * @open_req_cnt     : Count of open requests yet to be serviced in the kernel.
  */
 struct cam_req_mgr_traverse {
 	int32_t                       idx;
@@ -144,7 +145,8 @@
 	struct cam_req_mgr_req_queue *in_q;
 	bool                          validate_only;
 	bool                          self_link;
-	int32_t                      open_req_cnt;
+	bool                          inject_delay_chk;
+	int32_t                       open_req_cnt;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
index 966b573..066efd6 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -59,11 +59,11 @@
 		(struct cam_req_mgr_core_workq *)task->parent;
 	unsigned long flags = 0;
 
-	WORKQ_ACQUIRE_LOCK(workq, flags);
 	list_del_init(&task->entry);
 	task->cancel = 0;
 	task->process_cb = NULL;
 	task->priv = NULL;
+	WORKQ_ACQUIRE_LOCK(workq, flags);
 	list_add_tail(&task->entry,
 		&workq->task.empty_head);
 	atomic_add(1, &workq->task.free_cnt);
@@ -127,28 +127,6 @@
 	}
 }
 
-void crm_workq_clear_q(struct cam_req_mgr_core_workq *workq)
-{
-	int32_t                 i = CRM_TASK_PRIORITY_0;
-	struct crm_workq_task  *task, *task_save;
-
-	CAM_DBG(CAM_CRM, "pending_cnt %d",
-		atomic_read(&workq->task.pending_cnt));
-
-	while (i < CRM_TASK_PRIORITY_MAX) {
-		if (!list_empty(&workq->task.process_head[i])) {
-			list_for_each_entry_safe(task, task_save,
-				&workq->task.process_head[i], entry) {
-				cam_req_mgr_workq_put_task(task);
-				CAM_WARN(CAM_CRM, "flush task %pK, %d, cnt %d",
-					task, i, atomic_read(
-					&workq->task.free_cnt));
-			}
-		}
-		i++;
-	}
-}
-
 int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
 	void *priv, int32_t prio)
 {
@@ -167,10 +145,6 @@
 		rc = -EINVAL;
 		goto end;
 	}
-	if (!workq->job) {
-		rc = -EINVAL;
-		goto end;
-	}
 
 	if (task->cancel == 1) {
 		cam_req_mgr_workq_put_task(task);
@@ -184,16 +158,21 @@
 		? prio : CRM_TASK_PRIORITY_0;
 
 	WORKQ_ACQUIRE_LOCK(workq, flags);
+		if (!workq->job) {
+			rc = -EINVAL;
+			WORKQ_RELEASE_LOCK(workq, flags);
+			goto end;
+		}
+
 	list_add_tail(&task->entry,
 		&workq->task.process_head[task->priority]);
-	WORKQ_RELEASE_LOCK(workq, flags);
 
 	atomic_add(1, &workq->task.pending_cnt);
 	CAM_DBG(CAM_CRM, "enq task %pK pending_cnt %d",
 		task, atomic_read(&workq->task.pending_cnt));
 
 	queue_work(workq->job, &workq->work);
-
+	WORKQ_RELEASE_LOCK(workq, flags);
 end:
 	return rc;
 }
@@ -252,8 +231,7 @@
 			task = &crm_workq->task.pool[i];
 			task->parent = (void *)crm_workq;
 			/* Put all tasks in free pool */
-			list_add_tail(&task->entry,
-			&crm_workq->task.process_head[CRM_TASK_PRIORITY_0]);
+			INIT_LIST_HEAD(&task->entry);
 			cam_req_mgr_workq_put_task(task);
 		}
 		*workq = crm_workq;
@@ -266,13 +244,18 @@
 
 void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **crm_workq)
 {
+	unsigned long flags = 0;
+	struct workqueue_struct   *job;
 	CAM_DBG(CAM_CRM, "destroy workque %pK", crm_workq);
 	if (*crm_workq) {
-		crm_workq_clear_q(*crm_workq);
+		WORKQ_ACQUIRE_LOCK(*crm_workq, flags);
 		if ((*crm_workq)->job) {
-			destroy_workqueue((*crm_workq)->job);
+			job = (*crm_workq)->job;
 			(*crm_workq)->job = NULL;
-		}
+			WORKQ_RELEASE_LOCK(*crm_workq, flags);
+			destroy_workqueue(job);
+		} else
+			WORKQ_RELEASE_LOCK(*crm_workq, flags);
 		kfree((*crm_workq)->task.pool);
 		kfree(*crm_workq);
 		*crm_workq = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index 2f74765..a34d70c 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -603,7 +603,11 @@
 
 void cam_actuator_shutdown(struct cam_actuator_ctrl_t *a_ctrl)
 {
-	int rc;
+	int rc = 0;
+	struct cam_actuator_soc_private  *soc_private =
+		(struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t *power_info =
+		&soc_private->power_info;
 
 	if (a_ctrl->cam_act_state == CAM_ACTUATOR_INIT)
 		return;
@@ -612,6 +616,7 @@
 		rc = cam_actuator_power_down(a_ctrl);
 		if (rc < 0)
 			CAM_ERR(CAM_ACTUATOR, "Actuator Power down failed");
+		a_ctrl->cam_act_state = CAM_ACTUATOR_ACQUIRE;
 	}
 
 	if (a_ctrl->cam_act_state >= CAM_ACTUATOR_ACQUIRE) {
@@ -622,6 +627,12 @@
 		a_ctrl->bridge_intf.link_hdl = -1;
 		a_ctrl->bridge_intf.session_hdl = -1;
 	}
+
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
+
 	a_ctrl->cam_act_state = CAM_ACTUATOR_INIT;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
index fe69fcb..b47f4f3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
@@ -205,8 +205,6 @@
 	CAM_INFO(CAM_CCI, "****CCI MASTER %d Registers ****",
 		master);
 	for (i = 0; i < DEBUG_MASTER_REG_COUNT; i++) {
-		if (i == 6)
-			continue;
 		reg_offset = DEBUG_MASTER_REG_START + master*0x100 + i * 4;
 		read_val = cam_io_r_mb(base + reg_offset);
 		CAM_INFO(CAM_CCI, "offset = 0x%X value = 0x%X",
@@ -868,6 +866,180 @@
 	return rc;
 }
 
+static int32_t cam_cci_burst_read(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl)
+{
+	int32_t rc = 0;
+	uint32_t val = 0, i = 0;
+	unsigned long rem_jiffies;
+	int32_t read_words = 0, exp_words = 0;
+	int32_t index = 0, first_byte = 0, total_read_words = 0;
+	enum cci_i2c_master_t master;
+	enum cci_i2c_queue_t queue = QUEUE_1;
+	struct cci_device                  *cci_dev = NULL;
+	struct cam_cci_read_cfg            *read_cfg = NULL;
+	struct cam_hw_soc_info             *soc_info = NULL;
+	void __iomem                       *base = NULL;
+
+	cci_dev = v4l2_get_subdevdata(sd);
+	master = c_ctrl->cci_info->cci_i2c_master;
+	read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+
+	if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+		|| c_ctrl->cci_info->cci_i2c_master < 0) {
+		CAM_ERR(CAM_CCI, "Invalid I2C master addr");
+		return -EINVAL;
+	}
+
+	soc_info = &cci_dev->soc_info;
+	base = soc_info->reg_map[0].mem_base;
+	mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+
+	/*
+	 * Todo: If there is a change in frequency of operation
+	 * Wait for previos transaction to complete
+	 */
+
+	/* Set the I2C Frequency */
+	rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
+		goto rel_mutex;
+	}
+
+	/*
+	 * Call validate queue to make sure queue is empty before starting.
+	 * If this call fails, don't proceed with i2c_read call. This is to
+	 * avoid overflow / underflow of queue
+	 */
+	rc = cam_cci_validate_queue(cci_dev,
+		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size - 1,
+		master, queue);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "Initial validataion failed rc %d", rc);
+		goto rel_mutex;
+	}
+
+	if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
+		CAM_ERR(CAM_CCI, "More than max retries");
+		goto rel_mutex;
+	}
+
+	if (read_cfg->data == NULL) {
+		CAM_ERR(CAM_CCI, "Data ptr is NULL");
+		goto rel_mutex;
+	}
+
+	if (read_cfg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		CAM_ERR(CAM_CCI, "failed : Invalid addr type: %u",
+			read_cfg->addr_type);
+		rc = -EINVAL;
+		goto rel_mutex;
+	}
+
+	CAM_DBG(CAM_CCI, "set param sid 0x%x retries %d id_map %d",
+		c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
+		c_ctrl->cci_info->id_map);
+	val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+		c_ctrl->cci_info->retries << 16 |
+		c_ctrl->cci_info->id_map << 18;
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_LOCK_CMD;
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4);
+	for (i = 0; i < read_cfg->addr_type; i++) {
+		val |= ((read_cfg->addr >> (i << 3)) & 0xFF)  <<
+		((read_cfg->addr_type - i) << 3);
+	}
+
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4);
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_UNLOCK_CMD;
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
+			+ master * 0x200 + queue * 0x100);
+	CAM_DBG(CAM_CCI, "cur word cnt 0x%x", val);
+	cam_io_w_mb(val, base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
+			+ master * 0x200 + queue * 0x100);
+
+	val = 1 << ((master * 2) + queue);
+	cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR);
+	exp_words = ((read_cfg->num_byte / 4) + 1);
+
+	while (exp_words != total_read_words) {
+		rem_jiffies = wait_for_completion_timeout(
+			&cci_dev->cci_master_info[master].reset_complete,
+			CCI_TIMEOUT);
+		if (!rem_jiffies) {
+			rc = -ETIMEDOUT;
+			val = cam_io_r_mb(base +
+				CCI_I2C_M0_READ_BUF_LEVEL_ADDR +
+				master * 0x100);
+			CAM_ERR(CAM_CCI,
+				"wait_for_completion_timeout rc = %d FIFO buf_lvl:0x%x",
+				rc, val);
+#ifdef DUMP_CCI_REGISTERS
+			cam_cci_dump_registers(cci_dev, master, queue);
+#endif
+			cam_cci_flush_queue(cci_dev, master);
+			goto rel_mutex;
+		}
+
+		read_words = cam_io_r_mb(base +
+			CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
+		total_read_words += read_words;
+		do {
+			val = cam_io_r_mb(base +
+				CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
+			for (i = 0; (i < 4) &&
+				(index < read_cfg->num_byte); i++) {
+				CAM_DBG(CAM_CCI, "i:%d index:%d", i, index);
+				if (!first_byte) {
+					CAM_DBG(CAM_CCI, "sid 0x%x",
+						val & 0xFF);
+					first_byte++;
+				} else {
+					read_cfg->data[index] =
+						(val  >> (i * 8)) & 0xFF;
+					CAM_DBG(CAM_CCI, "data[%d] 0x%x", index,
+						read_cfg->data[index]);
+					index++;
+				}
+			}
+		} while (--read_words > 0);
+	}
+
+rel_mutex:
+	mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+	return rc;
+}
+
 static int32_t cam_cci_read(struct v4l2_subdev *sd,
 	struct cam_cci_ctrl *c_ctrl)
 {
@@ -1004,7 +1176,11 @@
 #endif
 		if (rc == 0)
 			rc = -ETIMEDOUT;
-		CAM_ERR(CAM_CCI, "wait_for_completion_timeout rc = %d", rc);
+		val = cam_io_r_mb(base +
+			CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
+		CAM_ERR(CAM_CCI,
+			"wait_for_completion_timeout rc = %d FIFO buf_lvl: 0x%x",
+			rc, val);
 		cam_cci_flush_queue(cci_dev, master);
 		goto rel_mutex;
 	} else {
@@ -1224,19 +1400,26 @@
 
 	read_bytes = read_cfg->num_byte;
 	do {
-		if (read_bytes > CCI_READ_MAX)
-			read_cfg->num_byte = CCI_READ_MAX;
+		if (read_bytes > CCI_I2C_MAX_BYTE_COUNT)
+			read_cfg->num_byte = CCI_I2C_MAX_BYTE_COUNT;
 		else
 			read_cfg->num_byte = read_bytes;
-		rc = cam_cci_read(sd, c_ctrl);
-		if (rc < 0) {
-			CAM_ERR(CAM_CCI, "failed rc %d", rc);
+
+		if (read_cfg->num_byte > CCI_READ_MAX)
+			rc = cam_cci_burst_read(sd, c_ctrl);
+		else
+			rc = cam_cci_read(sd, c_ctrl);
+
+		if (!rc) {
+			CAM_ERR(CAM_CCI, "failed to read rc:%d", rc);
 			goto ERROR;
 		}
-		if (read_bytes > CCI_READ_MAX) {
-			read_cfg->addr += CCI_READ_MAX;
-			read_cfg->data += CCI_READ_MAX;
-			read_bytes -= CCI_READ_MAX;
+
+		if (read_bytes > CCI_I2C_MAX_BYTE_COUNT) {
+			read_cfg->addr += (CCI_I2C_MAX_BYTE_COUNT /
+				read_cfg->data_type);
+			read_cfg->data += CCI_I2C_MAX_BYTE_COUNT;
+			read_bytes -= CCI_I2C_MAX_BYTE_COUNT;
 		} else {
 			read_bytes = 0;
 		}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
index da08bc7..c8ca85d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
@@ -58,18 +58,23 @@
 
 irqreturn_t cam_cci_irq(int irq_num, void *data)
 {
-	uint32_t irq;
+	uint32_t irq_status0 = 0;
+	uint32_t irq_status1 = 0;
 	struct cci_device *cci_dev = data;
 	struct cam_hw_soc_info *soc_info =
 		&cci_dev->soc_info;
 	void __iomem *base = soc_info->reg_map[0].mem_base;
 	unsigned long flags;
+	bool burst_read_assert = false;
 
-	irq = cam_io_r_mb(base + CCI_IRQ_STATUS_0_ADDR);
-	cam_io_w_mb(irq, base + CCI_IRQ_CLEAR_0_ADDR);
+	irq_status0 = cam_io_r_mb(base + CCI_IRQ_STATUS_0_ADDR);
+	irq_status1 = cam_io_r_mb(base + CCI_IRQ_STATUS_1_ADDR);
+	cam_io_w_mb(irq_status0, base + CCI_IRQ_CLEAR_0_ADDR);
+	cam_io_w_mb(irq_status1, base + CCI_IRQ_CLEAR_1_ADDR);
 	cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
 
-	if (irq & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
+	CAM_DBG(CAM_CCI, "irq0:%x irq1:%x", irq_status0, irq_status1);
+	if (irq_status0 & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
 		if (cci_dev->cci_master_info[MASTER_0].reset_pending == TRUE) {
 			cci_dev->cci_master_info[MASTER_0].reset_pending =
 				FALSE;
@@ -83,11 +88,24 @@
 			&cci_dev->cci_master_info[MASTER_1].reset_complete);
 		}
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) {
+
+	if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) &&
+		(irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD)) {
+		cci_dev->cci_master_info[MASTER_0].status = 0;
+		complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
+		burst_read_assert = true;
+	}
+	if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) &&
+		(!burst_read_assert)) {
 		cci_dev->cci_master_info[MASTER_0].status = 0;
 		complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) {
+	if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) &&
+		(!burst_read_assert)) {
+		cci_dev->cci_master_info[MASTER_0].status = 0;
+		complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
+	}
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) {
 		struct cam_cci_master_info *cci_master_info;
 
 		cci_master_info = &cci_dev->cci_master_info[MASTER_0];
@@ -104,7 +122,7 @@
 			&cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_0],
 			flags);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK) {
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK) {
 		struct cam_cci_master_info *cci_master_info;
 
 		cci_master_info = &cci_dev->cci_master_info[MASTER_0];
@@ -121,11 +139,23 @@
 			&cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_1],
 			flags);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) {
+	if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) &&
+		(irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD)) {
+		cci_dev->cci_master_info[MASTER_1].status = 0;
+		complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
+		burst_read_assert = true;
+	}
+	if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) &&
+		(!burst_read_assert)) {
 		cci_dev->cci_master_info[MASTER_1].status = 0;
 		complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) {
+	if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) &&
+		(!burst_read_assert)) {
+		cci_dev->cci_master_info[MASTER_1].status = 0;
+		complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
+	}
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) {
 		struct cam_cci_master_info *cci_master_info;
 
 		cci_master_info = &cci_dev->cci_master_info[MASTER_1];
@@ -142,7 +172,7 @@
 			&cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_0],
 			flags);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK) {
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK) {
 		struct cam_cci_master_info *cci_master_info;
 
 		cci_master_info = &cci_dev->cci_master_info[MASTER_1];
@@ -159,27 +189,27 @@
 			&cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_1],
 			flags);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
 		cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
 		cam_io_w_mb(CCI_M0_RESET_RMSK,
 			base + CCI_RESET_CMD_ADDR);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
 		cci_dev->cci_master_info[MASTER_1].reset_pending = TRUE;
 		cam_io_w_mb(CCI_M1_RESET_RMSK,
 			base + CCI_RESET_CMD_ADDR);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
 		cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
 		cam_io_w_mb(CCI_M0_HALT_REQ_RMSK,
 			base + CCI_HALT_REQ_ADDR);
-		CAM_DBG(CAM_CCI, "MASTER_0 error 0x%x", irq);
+		CAM_DBG(CAM_CCI, "MASTER_0 error 0x%x", irq_status0);
 	}
-	if (irq & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
 		cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
 		cam_io_w_mb(CCI_M1_HALT_REQ_RMSK,
 			base + CCI_HALT_REQ_ADDR);
-		CAM_DBG(CAM_CCI, "MASTER_1 error 0x%x", irq);
+		CAM_DBG(CAM_CCI, "MASTER_1 error 0x%x", irq_status0);
 	}
 	return IRQ_HANDLED;
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
index 7cde619..d48ffd1 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
@@ -65,19 +65,14 @@
 #define MAX_LRME_V4l2_EVENTS 30
 
 /* Max bytes that can be read per CCI read transaction */
-#define CCI_READ_MAX 12
+#define CCI_READ_MAX 256
 #define CCI_I2C_READ_MAX_RETRIES 3
 #define CCI_I2C_MAX_READ 8192
 #define CCI_I2C_MAX_WRITE 8192
+#define CCI_I2C_MAX_BYTE_COUNT 65535
 
 #define CAMX_CCI_DEV_NAME "cam-cci-driver"
 
-/* Max bytes that can be read per CCI read transaction */
-#define CCI_READ_MAX 12
-#define CCI_I2C_READ_MAX_RETRIES 3
-#define CCI_I2C_MAX_READ 8192
-#define CCI_I2C_MAX_WRITE 8192
-
 #define PRIORITY_QUEUE (QUEUE_0)
 #define SYNC_QUEUE (QUEUE_1)
 
@@ -125,6 +120,7 @@
 	uint16_t addr_type;
 	uint8_t *data;
 	uint16_t num_byte;
+	uint16_t data_type;
 };
 
 struct cam_cci_i2c_queue_info {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h
index c18593e..31c8e26 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h
@@ -43,27 +43,36 @@
 #define CCI_I2C_M0_Q0_LOAD_DATA_ADDR                                0x00000310
 #define CCI_IRQ_MASK_0_ADDR                                         0x00000c04
 #define CCI_IRQ_MASK_0_RMSK                                         0x7fff7ff7
+#define CCI_IRQ_MASK_1_ADDR                                         0x00000c10
+#define CCI_IRQ_MASK_1_RMSK                                         0x00110000
 #define CCI_IRQ_CLEAR_0_ADDR                                        0x00000c08
+#define CCI_IRQ_CLEAR_1_ADDR                                        0x00000c14
 #define CCI_IRQ_STATUS_0_ADDR                                       0x00000c0c
+#define CCI_IRQ_STATUS_1_ADDR                                       0x00000c18
 #define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK                   0x4000000
 #define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK                   0x2000000
 #define CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK                           0x1000000
 #define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK                        0x100000
 #define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK                         0x10000
 #define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK                            0x1000
+#define CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD                          0x100000
 #define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK                           0x100
 #define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK                            0x10
 #define CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK                          0x18000EE6
 #define CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK                          0x60EE6000
 #define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK                               0x1
+#define CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD                           0x10000
+#define CCI_I2C_M0_RD_THRESHOLD_ADDR                                0x00000120
+#define CCI_I2C_M1_RD_THRESHOLD_ADDR                                0x00000220
+#define CCI_I2C_RD_THRESHOLD_VALUE                                        0x38
 #define CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR                               0x00000c00
 
 #define DEBUG_TOP_REG_START                                                0x0
 #define DEBUG_TOP_REG_COUNT                                                 14
 #define DEBUG_MASTER_REG_START                                           0x100
-#define DEBUG_MASTER_REG_COUNT                                               8
+#define DEBUG_MASTER_REG_COUNT                                               9
 #define DEBUG_MASTER_QUEUE_REG_START                                     0x300
-#define DEBUG_MASTER_QUEUE_REG_COUNT                                         6
+#define DEBUG_MASTER_QUEUE_REG_COUNT                                         7
 #define DEBUG_INTR_REG_START                                             0xC00
 #define DEBUG_INTR_REG_COUNT                                                 7
 #endif /* _CAM_CCI_HWREG_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
index 295259d..e0b27ca 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
@@ -146,6 +146,10 @@
 		base + CCI_IRQ_MASK_0_ADDR);
 	cam_io_w_mb(CCI_IRQ_MASK_0_RMSK,
 		base + CCI_IRQ_CLEAR_0_ADDR);
+	cam_io_w_mb(CCI_IRQ_MASK_1_RMSK,
+		base + CCI_IRQ_MASK_1_ADDR);
+	cam_io_w_mb(CCI_IRQ_MASK_1_RMSK,
+		base + CCI_IRQ_CLEAR_1_ADDR);
 	cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
 
 	for (i = 0; i < MASTER_MAX; i++) {
@@ -157,6 +161,13 @@
 			flush_workqueue(cci_dev->write_wq[i]);
 		}
 	}
+
+	/* Set RD FIFO threshold for M0 & M1 */
+	cam_io_w_mb(CCI_I2C_RD_THRESHOLD_VALUE,
+		base + CCI_I2C_M0_RD_THRESHOLD_ADDR);
+	cam_io_w_mb(CCI_I2C_RD_THRESHOLD_VALUE,
+		base + CCI_I2C_M1_RD_THRESHOLD_ADDR);
+
 	cci_dev->cci_state = CCI_STATE_ENABLED;
 
 	return 0;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index dbbac08..2688cd5 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -614,6 +614,13 @@
 
 		if (csiphy_dev->acquire_count == 0)
 			csiphy_dev->csiphy_state = CAM_CSIPHY_INIT;
+
+		if (csiphy_dev->config_count == 0) {
+			CAM_DBG(CAM_CSIPHY, "reset csiphy_info");
+			csiphy_dev->csiphy_info.lane_mask = 0;
+			csiphy_dev->csiphy_info.lane_cnt = 0;
+			csiphy_dev->csiphy_info.combo_mode = 0;
+		}
 	}
 		break;
 	case CAM_CONFIG_DEV: {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
index 3f743fc..3245093 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
@@ -152,7 +152,7 @@
 		{0x0008, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
 		{0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0060, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -168,7 +168,7 @@
 		{0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0760, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -183,7 +183,7 @@
 		{0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
 		{0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0260, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -198,7 +198,7 @@
 		{0x0408, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
 		{0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0460, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -214,7 +214,7 @@
 		{0x060C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0638, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0660, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 };
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
index 30b9d96..7f94f8d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -109,6 +109,7 @@
 			rc = camera_io_dev_read_seq(&e_ctrl->io_master_info,
 				emap[j].mem.addr, memptr,
 				emap[j].mem.addr_type,
+				emap[j].mem.data_type,
 				emap[j].mem.valid_size);
 			if (rc) {
 				CAM_ERR(CAM_EEPROM, "read failed rc %d",
@@ -314,8 +315,8 @@
 power_down:
 	cam_eeprom_power_down(e_ctrl);
 data_mem_free:
-	kfree(e_ctrl->cal_data.mapdata);
-	kfree(e_ctrl->cal_data.map);
+	vfree(e_ctrl->cal_data.mapdata);
+	vfree(e_ctrl->cal_data.map);
 	e_ctrl->cal_data.num_data = 0;
 	e_ctrl->cal_data.num_map = 0;
 	e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
@@ -542,9 +543,9 @@
 		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
 	struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
 
-	e_ctrl->cal_data.map = kcalloc((MSM_EEPROM_MEMORY_MAP_MAX_SIZE *
-		MSM_EEPROM_MAX_MEM_MAP_CNT),
-		(sizeof(struct cam_eeprom_memory_map_t)), GFP_KERNEL);
+	e_ctrl->cal_data.map = vzalloc((MSM_EEPROM_MEMORY_MAP_MAX_SIZE *
+		MSM_EEPROM_MAX_MEM_MAP_CNT) *
+		(sizeof(struct cam_eeprom_memory_map_t)));
 	if (!e_ctrl->cal_data.map) {
 		rc = -ENOMEM;
 		CAM_ERR(CAM_EEPROM, "failed");
@@ -737,8 +738,8 @@
 				return rc;
 			}
 			rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
-			kfree(e_ctrl->cal_data.mapdata);
-			kfree(e_ctrl->cal_data.map);
+			vfree(e_ctrl->cal_data.mapdata);
+			vfree(e_ctrl->cal_data.map);
 			e_ctrl->cal_data.num_data = 0;
 			e_ctrl->cal_data.num_map = 0;
 			CAM_DBG(CAM_EEPROM,
@@ -753,7 +754,7 @@
 		}
 
 		e_ctrl->cal_data.mapdata =
-			kzalloc(e_ctrl->cal_data.num_data, GFP_KERNEL);
+			vzalloc(e_ctrl->cal_data.num_data);
 		if (!e_ctrl->cal_data.mapdata) {
 			rc = -ENOMEM;
 			CAM_ERR(CAM_EEPROM, "failed");
@@ -778,8 +779,12 @@
 		rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
 		rc = cam_eeprom_power_down(e_ctrl);
 		e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
-		kfree(e_ctrl->cal_data.mapdata);
-		kfree(e_ctrl->cal_data.map);
+		vfree(e_ctrl->cal_data.mapdata);
+		vfree(e_ctrl->cal_data.map);
+		kfree(power_info->power_setting);
+		kfree(power_info->power_down_setting);
+		power_info->power_setting = NULL;
+		power_info->power_down_setting = NULL;
 		e_ctrl->cal_data.num_data = 0;
 		e_ctrl->cal_data.num_map = 0;
 		break;
@@ -790,11 +795,13 @@
 power_down:
 	cam_eeprom_power_down(e_ctrl);
 memdata_free:
-	kfree(e_ctrl->cal_data.mapdata);
+	vfree(e_ctrl->cal_data.mapdata);
 error:
 	kfree(power_info->power_setting);
 	kfree(power_info->power_down_setting);
-	kfree(e_ctrl->cal_data.map);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
+	vfree(e_ctrl->cal_data.map);
 	e_ctrl->cal_data.num_data = 0;
 	e_ctrl->cal_data.num_map = 0;
 	e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
@@ -829,6 +836,8 @@
 
 		kfree(power_info->power_setting);
 		kfree(power_info->power_down_setting);
+		power_info->power_setting = NULL;
+		power_info->power_down_setting = NULL;
 	}
 
 	e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
index 196df08..dfcb9fc 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
@@ -636,7 +636,11 @@
 
 void cam_ois_shutdown(struct cam_ois_ctrl_t *o_ctrl)
 {
-	int rc;
+	int rc = 0;
+	struct cam_ois_soc_private  *soc_private =
+		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t *power_info =
+		&soc_private->power_info;
 
 	if (o_ctrl->cam_ois_state == CAM_OIS_INIT)
 		return;
@@ -645,6 +649,7 @@
 		rc = cam_ois_power_down(o_ctrl);
 		if (rc < 0)
 			CAM_ERR(CAM_OIS, "OIS Power down failed");
+		o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE;
 	}
 
 	if (o_ctrl->cam_ois_state >= CAM_OIS_ACQUIRE) {
@@ -656,6 +661,11 @@
 		o_ctrl->bridge_intf.session_hdl = -1;
 	}
 
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
+
 	o_ctrl->cam_ois_state = CAM_OIS_INIT;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index d58834c..2133932 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -518,6 +518,8 @@
 
 	kfree(power_info->power_setting);
 	kfree(power_info->power_down_setting);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
 
 	s_ctrl->streamon_count = 0;
 	s_ctrl->streamoff_count = 0;
@@ -584,24 +586,6 @@
 				"Already Sensor Probed in the slot");
 			break;
 		}
-		/* Allocate memory for power up setting */
-		pu = kzalloc(sizeof(struct cam_sensor_power_setting) *
-			MAX_POWER_CONFIG, GFP_KERNEL);
-		if (!pu) {
-			rc = -ENOMEM;
-			goto release_mutex;
-		}
-
-		pd = kzalloc(sizeof(struct cam_sensor_power_setting) *
-			MAX_POWER_CONFIG, GFP_KERNEL);
-		if (!pd) {
-			kfree(pu);
-			rc = -ENOMEM;
-			goto release_mutex;
-		}
-
-		power_info->power_setting = pu;
-		power_info->power_down_setting = pd;
 
 		if (cmd->handle_type ==
 			CAM_HANDLE_MEM_HANDLE) {
@@ -618,6 +602,9 @@
 			return -EINVAL;
 		}
 
+		pu = power_info->power_setting;
+		pd = power_info->power_down_setting;
+
 		/* Parse and fill vreg params for powerup settings */
 		rc = msm_camera_fill_vreg_params(
 			&s_ctrl->soc_info,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
index 2c1f520..1ec7d9a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -33,6 +33,7 @@
 	cci_ctrl.cci_info = cci_client;
 	cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
 	cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = addr_type;
+	cci_ctrl.cfg.cci_i2c_read_cfg.data_type = data_type;
 	cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
 	cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = data_type;
 	rc = v4l2_subdev_call(cci_client->cci_subdev,
@@ -59,6 +60,7 @@
 int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *cci_client,
 	uint32_t addr, uint8_t *data,
 	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type,
 	uint32_t num_byte)
 {
 	int32_t                    rc = -EFAULT;
@@ -67,6 +69,7 @@
 	struct cam_cci_ctrl        cci_ctrl;
 
 	if ((addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+		|| (data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
 		|| (num_byte > I2C_REG_DATA_MAX)) {
 		CAM_ERR(CAM_SENSOR, "addr_type %d num_byte %d", addr_type,
 			num_byte);
@@ -81,6 +84,7 @@
 	cci_ctrl.cci_info = cci_client;
 	cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
 	cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = addr_type;
+	cci_ctrl.cfg.cci_i2c_read_cfg.data_type = data_type;
 	cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
 	cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = num_byte;
 	cci_ctrl.status = -EFAULT;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
index 7cddcf9..e79fffb 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,7 @@
  * @addr: I2c address
  * @data: I2C data
  * @addr_type: I2c address type
+ * @data_type: I2c data type
  * @num_byte: number of bytes
  *
  * This API handles CCI sequential read
@@ -53,6 +54,7 @@
 int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *client,
 	uint32_t addr, uint8_t *data,
 	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type,
 	uint32_t num_byte);
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
index 89aad4e..ed490fd 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -69,11 +69,12 @@
 
 int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
 	uint32_t addr, uint8_t *data,
-	enum camera_sensor_i2c_type addr_type, int32_t num_bytes)
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type, int32_t num_bytes)
 {
 	if (io_master_info->master_type == CCI_MASTER) {
 		return cam_camera_cci_i2c_read_seq(io_master_info->cci_client,
-			addr, data, addr_type, num_bytes);
+			addr, data, addr_type, data_type, num_bytes);
 	} else if (io_master_info->master_type == I2C_MASTER) {
 		return cam_qup_i2c_read_seq(io_master_info->client,
 			addr, data, addr_type, num_bytes);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
index ec5ed25..f1143c8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -52,6 +52,7 @@
  * @io_master_info: I2C/SPI master information
  * @addr: I2C address
  * @data: I2C data
+ * @addr_type: I2C addr type
  * @data_type: I2C data type
  * @num_bytes: number of bytes
  *
@@ -60,6 +61,7 @@
 int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
 	uint32_t addr, uint8_t *data,
 	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type,
 	int32_t num_bytes);
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index 10d29c9..73a0cf7 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -860,8 +860,10 @@
 	return rc;
 free_power_down_settings:
 	kfree(power_info->power_down_setting);
+	power_info->power_down_setting = NULL;
 free_power_settings:
 	kfree(power_info->power_setting);
+	power_info->power_setting = NULL;
 	return rc;
 }
 
@@ -1304,7 +1306,9 @@
 		CAM_DBG(CAM_SENSOR, "index: %d", index);
 		power_setting = &ctrl->power_setting[index];
 		if (!power_setting) {
-			CAM_ERR(CAM_SENSOR, "Invalid power up settings");
+			CAM_ERR(CAM_SENSOR,
+				"Invalid power up settings for index %d",
+				index);
 			return -EINVAL;
 		}
 
@@ -1548,7 +1552,7 @@
 		if (ret)
 			CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
 		cam_res_mgr_shared_pinctrl_select_state(false);
-		pinctrl_put(ctrl->pinctrl_info.pinctrl);
+		devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
 		cam_res_mgr_shared_pinctrl_put();
 	}
 
@@ -1594,11 +1598,6 @@
 
 	pd = &ctrl->power_down_setting[index];
 
-	if (!pd) {
-		CAM_ERR(CAM_SENSOR, "Invalid power down setting");
-		return -EINVAL;
-	}
-
 	for (j = 0; j < num_vreg; j++) {
 		if (!strcmp(soc_info->rgltr_name[j], "cam_clk")) {
 
@@ -1641,7 +1640,7 @@
 {
 	int index = 0, ret = 0, num_vreg = 0, i;
 	struct cam_sensor_power_setting *pd = NULL;
-	struct cam_sensor_power_setting *ps;
+	struct cam_sensor_power_setting *ps = NULL;
 	struct msm_camera_gpio_num_info *gpio_num_info = NULL;
 
 	CAM_DBG(CAM_SENSOR, "Enter");
@@ -1661,6 +1660,13 @@
 	for (index = 0; index < ctrl->power_down_setting_size; index++) {
 		CAM_DBG(CAM_SENSOR, "index %d",  index);
 		pd = &ctrl->power_down_setting[index];
+		if (!pd) {
+			CAM_ERR(CAM_SENSOR,
+				"Invalid power down settings for index %d",
+				index);
+			return -EINVAL;
+		}
+
 		ps = NULL;
 		CAM_DBG(CAM_SENSOR, "type %d",  pd->seq_type);
 		switch (pd->seq_type) {
@@ -1760,7 +1766,7 @@
 			CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
 
 		cam_res_mgr_shared_pinctrl_select_state(false);
-		pinctrl_put(ctrl->pinctrl_info.pinctrl);
+		devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
 		cam_res_mgr_shared_pinctrl_put();
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index ecfc566..5cd3008 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -3343,6 +3343,7 @@
 		rc = cam_populate_smmu_context_banks(dev, CAM_ARM_SMMU);
 		if (rc < 0) {
 			CAM_ERR(CAM_SMMU, "Error: populating context banks");
+			cam_smmu_release_cb(pdev);
 			return -ENOMEM;
 		}
 		return rc;
diff --git a/drivers/media/platform/msm/camera_v2/Kconfig b/drivers/media/platform/msm/camera_v2/Kconfig
index cabc612..e5439d8 100644
--- a/drivers/media/platform/msm/camera_v2/Kconfig
+++ b/drivers/media/platform/msm/camera_v2/Kconfig
@@ -95,13 +95,22 @@
           and also provides support for writing data in case of FLASH ROM.
 	  Currently supports I2C, CCI and SPI protocol
 
+config MSM_ISP_V1
+        bool "QTI MSM Image Signal Processing interface support"
+        depends on MSMB_CAMERA
+        ---help---
+          Enable support for Image Signal Processing interface module.
+          This module acts as a crossbar between CSID and VFE. Output
+          of any CID of CSID can be routed to of pix or raw
+          data interface in VFE.
+
 config MSM_ISPIF
         bool "QTI MSM Image Signal Processing interface support"
         depends on MSMB_CAMERA
         ---help---
           Enable support for Image Signal Processing interface module.
           This module acts as a crossbar between CSID and VFE. Output
-          of any CID of CSID can be routed to of of pix or raw
+          of any CID of CSID can be routed to of pix or raw
           data interface in VFE.
 
 config MSM_ISPIF_V1
diff --git a/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c b/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c
index 22f4891..d32a0f2 100644
--- a/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c
+++ b/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c
@@ -20,8 +20,6 @@
 #define EMPTY_QSEECOM_HANDLE    NULL
 #define QSEECOM_SBUFF_SIZE      SZ_128K
 
-#define MSM_CAMERA_TZ_UTIL_VERBOSE
-
 #define MSM_CAMERA_TZ_BOOT_PROTECTED (false)
 
 /* Update version major number in case the HLOS-TA interface is changed*/
diff --git a/drivers/media/platform/msm/camera_v2/isp/Makefile b/drivers/media/platform/msm/camera_v2/isp/Makefile
index 621d81d..d36b1e2 100644
--- a/drivers/media/platform/msm/camera_v2/isp/Makefile
+++ b/drivers/media/platform/msm/camera_v2/isp/Makefile
@@ -1,5 +1,10 @@
 ccflags-y += -Idrivers/media/platform/msm/camera_v2
 ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
 ccflags-y += -Idrivers/media/platform/msm/camera_v2/common/
+ifeq ($(CONFIG_MSM_ISP_V1),y)
+obj-$(CONFIG_MSMB_CAMERA) += msm_isp_32.o msm_buf_mgr.o msm_isp_util_32.o msm_isp_axi_util_32.o msm_isp_stats_util_32.o
+obj-$(CONFIG_MSMB_CAMERA) += msm_isp32.o
+else
 obj-$(CONFIG_MSMB_CAMERA) += msm_buf_mgr.o msm_isp_util.o msm_isp_axi_util.o msm_isp_stats_util.o
 obj-$(CONFIG_MSMB_CAMERA) += msm_isp48.o msm_isp47.o msm_isp46.o msm_isp44.o msm_isp40.o msm_isp.o
+endif
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
index 691b492..6196a8c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -189,7 +189,9 @@
 	int i, rc = -1;
 	int ret;
 	struct msm_isp_buffer_mapped_info *mapped_info;
+#ifndef CONFIG_MSM_ISP_V1
 	uint32_t accu_length = 0;
+#endif
 	struct msm_isp_bufq *bufq = NULL;
 
 	bufq = msm_isp_get_bufq(buf_mgr, buf_info->bufq_handle);
@@ -228,8 +230,12 @@
 			goto get_phy_err;
 		}
 
+#ifdef CONFIG_MSM_ISP_V1
+		mapped_info->paddr += qbuf_buf->planes[i].offset;
+#else
 		mapped_info->paddr += accu_length;
 		accu_length += qbuf_buf->planes[i].length;
+#endif
 
 		CDBG("%s: plane: %d addr:%pK\n",
 			__func__, i, (void *)mapped_info->paddr);
@@ -732,10 +738,17 @@
 	spin_lock_irqsave(&bufq->bufq_lock, flags);
 
 	buf_info->frame_id = frame_id;
+#ifdef CONFIG_MSM_ISP_V1
+	if (buf_info->state == MSM_ISP_BUFFER_STATE_DEQUEUED) {
+		buf_info->state = MSM_ISP_BUFFER_STATE_DIVERTED;
+		buf_info->tv = tv;
+	}
+#else
 	if (BUF_SRC(bufq->stream_id) == MSM_ISP_BUFFER_SRC_NATIVE) {
 		buf_info->state = MSM_ISP_BUFFER_STATE_DIVERTED;
 		buf_info->tv = tv;
 	}
+#endif
 	spin_unlock_irqrestore(&bufq->bufq_lock, flags);
 	return 0;
 }
@@ -1077,7 +1090,6 @@
 	}
 }
 
-
 /**
  * msm_isp_buf_put_scratch() - Release scratch buffers
  * @buf_mgr: The buffer structure for h/w
@@ -1220,7 +1232,6 @@
 	return rc;
 }
 
-
 static int msm_isp_init_isp_buf_mgr(struct msm_isp_buf_mgr *buf_mgr,
 	const char *ctx_name)
 {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index 6da1360..a95917c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -12,21 +12,15 @@
 
 #include <linux/module.h>
 #include <linux/platform_device.h>
-
 #include "msm_isp32.h"
-#include "msm_isp_util.h"
-#include "msm_isp_axi_util.h"
-#include "msm_isp_stats_util.h"
-#include "msm_isp.h"
+#include "msm_isp_util_32.h"
+#include "msm_isp_axi_util_32.h"
+#include "msm_isp_stats_util_32.h"
+#include "msm_isp_32.h"
 #include "msm.h"
 #include "msm_camera_io_util.h"
 
-static const struct platform_device_id msm_vfe32_dev_id[] = {
-	{"msm_vfe32", (kernel_ulong_t) &vfe32_hw_info},
-	{}
-};
-
-#define VFE32_BURST_LEN 2
+#define VFE32_BURST_LEN 3
 #define VFE32_UB_SIZE 1024
 #define VFE32_UB_SIZE_32KB 2048
 #define VFE32_EQUAL_SLICE_UB 194
@@ -36,7 +30,7 @@
 #define VFE32_XBAR_BASE(idx) (0x40 + 0x4 * (idx / 4))
 #define VFE32_XBAR_SHIFT(idx) ((idx % 4) * 8)
 #define VFE32_PING_PONG_BASE(wm, ping_pong) \
-	(VFE32_WM_BASE(wm) + 0x4 * (1 + ((~ping_pong) & 0x1)))
+	(VFE32_WM_BASE(wm) + 0x4 * (1 + (~(ping_pong >> wm) & 0x1)))
 
 static uint8_t stats_pingpong_offset_map[] = {
 	7, 8, 9, 10, 11, 12, 13};
@@ -60,16 +54,6 @@
 	{"csi_vfe_clk", -1},
 };
 
-static uint32_t msm_vfe32_ub_reg_offset(struct vfe_device *vfe_dev, int idx)
-{
-	return (VFE32_WM_BASE(idx) + 0x10);
-}
-
-static uint32_t msm_vfe32_get_ub_size(struct vfe_device *vfe_dev)
-{
-	return MSM_ISP32_TOTAL_WM_UB;
-}
-
 static int32_t msm_vfe32_init_qos_parms(struct vfe_device *vfe_dev,
 				struct msm_vfe_hw_init_parms *qos_parms,
 				struct msm_vfe_hw_init_parms *ds_parms)
@@ -284,8 +268,6 @@
 		pr_err("%s: vfe ioremap failed\n", __func__);
 		goto vfe_remap_failed;
 	}
-	vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] =
-		vfe_dev->vfe_base;
 
 	vfe_dev->vfe_vbif_base = ioremap(vfe_dev->vfe_vbif_mem->start,
 		resource_size(vfe_dev->vfe_vbif_mem));
@@ -330,14 +312,12 @@
 
 static void msm_vfe32_release_hardware(struct vfe_device *vfe_dev)
 {
-	msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x1C);
-	msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x20);
-	disable_irq(vfe_dev->vfe_irq->start);
 	free_irq(vfe_dev->vfe_irq->start, vfe_dev);
 	tasklet_kill(&vfe_dev->vfe_tasklet);
-	msm_isp_flush_tasklet(vfe_dev);
 	iounmap(vfe_dev->vfe_vbif_base);
 	vfe_dev->vfe_vbif_base = NULL;
+	iounmap(vfe_dev->vfe_base);
+	vfe_dev->vfe_base = NULL;
 	if (vfe_dev->vfe_clk_idx == 1)
 		msm_cam_clk_enable(&vfe_dev->pdev->dev,
 				msm_vfe32_1_clk_info, vfe_dev->vfe_clk,
@@ -346,9 +326,6 @@
 		msm_cam_clk_enable(&vfe_dev->pdev->dev,
 				msm_vfe32_2_clk_info, vfe_dev->vfe_clk,
 				ARRAY_SIZE(msm_vfe32_2_clk_info), 0);
-	vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] = NULL;
-	iounmap(vfe_dev->vfe_base);
-	vfe_dev->vfe_base = NULL;
 	kfree(vfe_dev->vfe_clk);
 	regulator_disable(vfe_dev->fs_vfe);
 	msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
@@ -369,7 +346,6 @@
 	ds_parms.entries = "ds-entries";
 	ds_parms.regs = "ds-regs";
 	ds_parms.settings = "ds-settings";
-
 	msm_vfe32_init_qos_parms(vfe_dev, &qos_parms, &ds_parms);
 	msm_vfe32_init_vbif_parms(vfe_dev, &vbif_parms);
 
@@ -381,6 +357,11 @@
 	msm_camera_io_w_mb(0x1CFFFFFF, vfe_dev->vfe_base + 0x20);
 	msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x24);
 	msm_camera_io_w_mb(0x1FFFFFFF, vfe_dev->vfe_base + 0x28);
+	msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x6FC);
+	msm_camera_io_w(0x10000000, vfe_dev->vfe_base + VFE32_RDI_BASE(1));
+	msm_camera_io_w(0x10000000, vfe_dev->vfe_base + VFE32_RDI_BASE(2));
+	msm_camera_io_w(0x0, vfe_dev->vfe_base + VFE32_XBAR_BASE(0));
+	msm_camera_io_w(0x0, vfe_dev->vfe_base + VFE32_XBAR_BASE(4));
 
 }
 
@@ -396,13 +377,30 @@
 static void msm_vfe32_process_reset_irq(struct vfe_device *vfe_dev,
 	uint32_t irq_status0, uint32_t irq_status1)
 {
-	if (irq_status1 & BIT(23))
+	if (irq_status1 & BIT(23)) {
+		if (vfe_dev->vfe_reset_timeout_processed == 1) {
+			pr_err("%s:vfe reset was processed.\n", __func__);
+			return;
+		}
 		complete(&vfe_dev->reset_complete);
+	}
 }
 
 static void msm_vfe32_process_halt_irq(struct vfe_device *vfe_dev,
 	uint32_t irq_status0, uint32_t irq_status1)
 {
+	if (irq_status1 & (1 << 24))
+		msm_camera_io_w_mb(0, vfe_dev->vfe_base + 0x1D8);
+}
+
+static void msm_vfe32_process_epoch_irq(struct vfe_device *vfe_dev,
+	uint32_t irq_status0, uint32_t irq_status1,
+	struct msm_isp_timestamp *ts)
+{
+	if (!(irq_status0 & 0x18))
+		return;
+	if (irq_status0 & (1 << 3))
+		msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
 }
 
 static void msm_vfe32_process_camif_irq(struct vfe_device *vfe_dev,
@@ -416,10 +414,12 @@
 		ISP_DBG("%s: SOF IRQ\n", __func__);
 		if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
 			&& vfe_dev->axi_data.src_info[VFE_PIX_0].
-			stream_count == 0) {
+			pix_stream_count == 0) {
 			msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
-			msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0, ts);
-			msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
+			if (vfe_dev->axi_data.stream_update)
+				msm_isp_axi_stream_update(vfe_dev,
+					(1 << VFE_PIX_0));
+			msm_isp_update_framedrop_reg(vfe_dev, (1 << VFE_PIX_0));
 		}
 	}
 }
@@ -485,7 +485,20 @@
 
 static void msm_vfe32_get_overflow_mask(uint32_t *overflow_mask)
 {
-	*overflow_mask = 0x0;
+	*overflow_mask = 0x003FFF7E;
+}
+
+static void msm_vfe32_get_rdi_wm_mask(struct vfe_device *vfe_dev,
+	uint32_t *rdi_wm_mask)
+{
+	*rdi_wm_mask = vfe_dev->axi_data.rdi_wm_mask;
+}
+
+static void msm_vfe32_get_irq_mask(struct vfe_device *vfe_dev,
+	uint32_t *irq0_mask, uint32_t *irq1_mask)
+{
+	*irq0_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
+	*irq1_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x20);
 }
 
 static void msm_vfe32_process_error_status(struct vfe_device *vfe_dev)
@@ -571,7 +584,7 @@
 		pr_err("%s: axi error\n", __func__);
 }
 
-static void msm_vfe32_read_and_clear_irq_status(struct vfe_device *vfe_dev,
+static void msm_vfe32_read_irq_status(struct vfe_device *vfe_dev,
 	uint32_t *irq_status0, uint32_t *irq_status1)
 {
 	*irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
@@ -589,122 +602,127 @@
 			msm_camera_io_r(vfe_dev->vfe_base + 0x7B4);
 }
 
-static void msm_vfe32_read_irq_status(struct vfe_device *vfe_dev,
-	uint32_t *irq_status0, uint32_t irq_status1)
-{
-	*irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
-	*irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x30);
-}
-
 static void msm_vfe32_process_reg_update(struct vfe_device *vfe_dev,
 	uint32_t irq_status0, uint32_t irq_status1,
 	struct msm_isp_timestamp *ts)
 {
-	uint32_t rdi_status;
-	enum msm_vfe_input_src i;
+	uint8_t input_src = 0x0;
 
 	if (!(irq_status0 & 0x20) && !(irq_status1 & 0x1C000000))
 		return;
 
 	if (irq_status0 & BIT(5)) {
-		msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
-		vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
-			VFE_PIX_0);
-		if (vfe_dev->axi_data.stream_update[VFE_PIX_0]) {
-			rdi_status = msm_camera_io_r(vfe_dev->vfe_base +
-				VFE32_XBAR_BASE(0));
-			rdi_status |= msm_camera_io_r(vfe_dev->vfe_base +
-				VFE32_XBAR_BASE(4));
-
-			if ((rdi_status & BIT(7)) && (!(irq_status0 & 0x20)))
-				return;
-		}
-		msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
-				MSM_ISP_COMP_IRQ_REG_UPD);
+		msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE, VFE_PIX_0, ts);
+		input_src |= (1 << VFE_PIX_0);
+	}
+	if (irq_status1 & BIT(26)) {
+		msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_RAW_0, ts);
+		input_src |= (1 << VFE_RAW_0);
+	}
+	if (irq_status1 & BIT(27)) {
+		msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_RAW_1, ts);
+		input_src |= (1 << VFE_RAW_1);
+	}
+	if (irq_status1 & BIT(28)) {
+		msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_RAW_2, ts);
+		input_src |= (1 << VFE_RAW_2);
 	}
 
-	for (i = VFE_RAW_0; i <= VFE_RAW_2; i++) {
-		if (irq_status1 & BIT(26 + (i - VFE_RAW_0))) {
-			msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
-			msm_isp_axi_stream_update(vfe_dev, i, ts);
-			msm_isp_update_framedrop_reg(vfe_dev, i);
-
-			vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
-				i);
+	if (vfe_dev->axi_data.stream_update)
+		msm_isp_axi_stream_update(vfe_dev, input_src);
+	if (atomic_read(&vfe_dev->stats_data.stats_update))
+		msm_isp_stats_stream_update(vfe_dev);
+	if (vfe_dev->axi_data.stream_update ||
+		atomic_read(&vfe_dev->stats_data.stats_update)) {
+		if (input_src & (1 << VFE_PIX_0)) {
+			vfe_dev->hw_info->vfe_ops.core_ops.
+				reg_update(vfe_dev, (1 << VFE_PIX_0));
 		}
 	}
-
+	msm_isp_update_framedrop_reg(vfe_dev, input_src);
+	msm_isp_update_stats_framedrop_reg(vfe_dev);
 	msm_isp_update_error_frame_count(vfe_dev);
-
-}
-
-static void msm_vfe32_process_epoch_irq(struct vfe_device *vfe_dev,
-	uint32_t irq_status0, uint32_t irq_status1,
-	struct msm_isp_timestamp *ts)
-{
-	/* Not supported */
-}
-
-static void msm_vfe32_reg_update(struct vfe_device *vfe_dev,
-	enum msm_vfe_input_src frame_src)
-{
-	if (vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1) {
-		msm_camera_io_w_mb(0xF,
-			vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0]
-			+ 0x260);
-		msm_camera_io_w_mb(0xF, vfe_dev->vfe_base + 0x260);
-	} else if (!vfe_dev->is_split) {
-		msm_camera_io_w_mb(0xF, vfe_dev->vfe_base + 0x260);
+	if ((input_src & (1 << VFE_RAW_0)) ||
+		(input_src & (1 << VFE_RAW_1)) ||
+		(input_src & (1 << VFE_RAW_2))) {
+		vfe_dev->hw_info->vfe_ops.core_ops.
+		reg_update(vfe_dev, input_src);
 	}
+
+	return;
+}
+
+static void msm_vfe32_reg_update(
+	struct vfe_device *vfe_dev, uint32_t input_src)
+{
+	msm_camera_io_w_mb(input_src, vfe_dev->vfe_base + 0x260);
 }
 
 static long msm_vfe32_reset_hardware(struct vfe_device *vfe_dev,
 	uint32_t first_start, uint32_t blocking)
 {
-	init_completion(&vfe_dev->reset_complete);
-	msm_camera_io_w_mb(0x3FF, vfe_dev->vfe_base + 0x4);
-	return wait_for_completion_timeout(
-	   &vfe_dev->reset_complete, msecs_to_jiffies(50));
+	long rc = 0;
+	uint32_t irq_status1;
+
+	if (blocking) {
+		init_completion(&vfe_dev->reset_complete);
+		msm_camera_io_w_mb(0x3FF, vfe_dev->vfe_base + 0x4);
+		vfe_dev->vfe_reset_timeout_processed = 0;
+		rc = wait_for_completion_timeout(
+			&vfe_dev->reset_complete, msecs_to_jiffies(500));
+	} else {
+		msm_camera_io_w_mb(0x3FF, vfe_dev->vfe_base + 0x4);
+	}
+
+	if (blocking && rc <= 0) {
+		/*read ISP status register*/
+		irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x30);
+		pr_err("%s: handling vfe reset time out error. irq_status1 0x%x\n",
+			__func__, irq_status1);
+		if (irq_status1 & BIT(23)) {
+			pr_err("%s: vfe reset has done actually\n", __func__);
+			vfe_dev->vfe_reset_timeout_processed = 1;
+			return 1;
+		}
+	}
+	return rc;
 }
 
 static void msm_vfe32_axi_reload_wm(
-	struct vfe_device *vfe_dev, void __iomem *vfe_base,
-	uint32_t reload_mask)
+	struct vfe_device *vfe_dev, uint32_t reload_mask)
 {
 	if (!vfe_dev->pdev->dev.of_node) {
 		/*vfe32 A-family: 8960*/
-		msm_camera_io_w_mb(reload_mask, vfe_base + 0x38);
+		msm_camera_io_w_mb(reload_mask, vfe_dev->vfe_base + 0x38);
 	} else {
 		/*vfe32 B-family: 8610*/
-		msm_camera_io_w(0x0, vfe_base + 0x24);
-		msm_camera_io_w(0x0, vfe_base + 0x28);
-		msm_camera_io_w(0x0, vfe_base + 0x20);
-		msm_camera_io_w_mb(0x1, vfe_base + 0x18);
-		msm_camera_io_w(0x9AAAAAAA, vfe_base + 0x600);
-		msm_camera_io_w(reload_mask, vfe_base + 0x38);
+		msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x28);
+		msm_camera_io_w(0x1C800000, vfe_dev->vfe_base + 0x20);
+		msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x18);
+		msm_camera_io_w(0x9AAAAAAA, vfe_dev->vfe_base + 0x600);
+		msm_camera_io_w(reload_mask, vfe_dev->vfe_base + 0x38);
 	}
 }
 
-static void msm_vfe32_axi_enable_wm(void __iomem *vfe_base,
+static void msm_vfe32_axi_enable_wm(struct vfe_device *vfe_dev,
 	uint8_t wm_idx, uint8_t enable)
 {
 	uint32_t val = msm_camera_io_r(
-	   vfe_base + VFE32_WM_BASE(wm_idx));
+	   vfe_dev->vfe_base + VFE32_WM_BASE(wm_idx));
 	if (enable)
 		val |= 0x1;
 	else
 		val &= ~0x1;
 	msm_camera_io_w_mb(val,
-		vfe_base + VFE32_WM_BASE(wm_idx));
+		vfe_dev->vfe_base + VFE32_WM_BASE(wm_idx));
 }
 
 static void msm_vfe32_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
 	struct msm_vfe_axi_stream *stream_info)
 {
 	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
-	int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
 	uint32_t comp_mask, comp_mask_index =
-		stream_info->comp_mask_index[vfe_idx];
+		stream_info->comp_mask_index;
 	uint32_t irq_mask;
 
 	comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x34);
@@ -721,9 +739,7 @@
 static void msm_vfe32_axi_clear_comp_mask(struct vfe_device *vfe_dev,
 	struct msm_vfe_axi_stream *stream_info)
 {
-	int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
-	uint32_t comp_mask, comp_mask_index =
-			stream_info->comp_mask_index[vfe_idx];
+	uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
 	uint32_t irq_mask;
 
 	comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x34);
@@ -739,10 +755,9 @@
 	struct msm_vfe_axi_stream *stream_info)
 {
 	uint32_t irq_mask;
-	int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
 
 	irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
-	irq_mask |= BIT(stream_info->wm[vfe_idx][0] + 6);
+	irq_mask |= BIT(stream_info->wm[0] + 6);
 	msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
 }
 
@@ -750,31 +765,40 @@
 	struct msm_vfe_axi_stream *stream_info)
 {
 	uint32_t irq_mask;
-	int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
 
 	irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
-	irq_mask &= ~BIT(stream_info->wm[vfe_idx][0] + 6);
+	irq_mask &= ~BIT(stream_info->wm[0] + 6);
 	msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
 }
 
 static void msm_vfe32_cfg_framedrop(struct vfe_device *vfe_dev,
-	struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
-	uint32_t framedrop_period)
+	struct msm_vfe_axi_stream *stream_info)
 {
-	void __iomem *vfe_base = vfe_dev->vfe_base;
+	uint32_t framedrop_pattern = 0, framedrop_period = 0;
+
+	if (stream_info->runtime_init_frame_drop == 0) {
+		framedrop_pattern = stream_info->framedrop_pattern;
+		framedrop_period = stream_info->framedrop_period;
+	}
+
+	if (stream_info->stream_type == BURST_STREAM &&
+		stream_info->runtime_burst_frame_count == 0) {
+		framedrop_pattern = 0;
+		framedrop_period = 0;
+	}
 
 	if (stream_info->stream_src == PIX_ENCODER) {
-		msm_camera_io_w(framedrop_period - 1, vfe_base + 0x504);
-		msm_camera_io_w(framedrop_period - 1, vfe_base + 0x508);
-		msm_camera_io_w(framedrop_pattern, vfe_base + 0x50C);
-		msm_camera_io_w(framedrop_pattern, vfe_base + 0x510);
+		msm_camera_io_w(framedrop_period, vfe_dev->vfe_base + 0x504);
+		msm_camera_io_w(framedrop_period, vfe_dev->vfe_base + 0x508);
+		msm_camera_io_w(framedrop_pattern, vfe_dev->vfe_base + 0x50C);
+		msm_camera_io_w(framedrop_pattern, vfe_dev->vfe_base + 0x510);
 	} else if (stream_info->stream_src == PIX_VIEWFINDER) {
-		msm_camera_io_w(framedrop_period - 1, vfe_base + 0x514);
-		msm_camera_io_w(framedrop_period - 1, vfe_base + 0x518);
-		msm_camera_io_w(framedrop_pattern, vfe_base + 0x51C);
-		msm_camera_io_w(framedrop_pattern, vfe_base + 0x520);
+		msm_camera_io_w(framedrop_period, vfe_dev->vfe_base + 0x514);
+		msm_camera_io_w(framedrop_period, vfe_dev->vfe_base + 0x518);
+		msm_camera_io_w(framedrop_pattern, vfe_dev->vfe_base + 0x51C);
+		msm_camera_io_w(framedrop_pattern, vfe_dev->vfe_base + 0x520);
 	}
-	msm_camera_io_w_mb(0x1, vfe_base + 0x260);
+	msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x260);
 }
 
 static void msm_vfe32_clear_framedrop(struct vfe_device *vfe_dev,
@@ -877,6 +901,7 @@
 	struct msm_vfe_pix_cfg *pix_cfg)
 {
 	pr_err("%s: Fetch engine not supported\n", __func__);
+	return;
 }
 
 static void msm_vfe32_cfg_camif(struct vfe_device *vfe_dev,
@@ -924,6 +949,7 @@
 		pr_err("%s: Unsupported input mux %d\n",
 			__func__, pix_cfg->input_mux);
 	}
+	return;
 }
 
 static void msm_vfe32_update_camif_state(
@@ -938,19 +964,20 @@
 
 	if (update_state == ENABLE_CAMIF) {
 		val = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
-		val |= 0x1;
+		val |= 0x19;
 		msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x1C);
-
+		msm_camera_io_w_mb(0xA, vfe_dev->vfe_base + 0x200);
 		val = msm_camera_io_r(vfe_dev->vfe_base + 0x1E4);
 		bus_en =
 		((vfe_dev->axi_data.src_info[
 			VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
 		vfe_en =
 		((vfe_dev->axi_data.src_info[
-			VFE_PIX_0].stream_count > 0) ? 1 : 0);
+			VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
 		val &= 0xFFFFFF3F;
 		val = val | bus_en << 7 | vfe_en << 6;
 		msm_camera_io_w(val, vfe_dev->vfe_base + 0x1E4);
+		msm_camera_io_w_mb(0x4, vfe_dev->vfe_base + 0x1E0);
 		msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1E0);
 		vfe_dev->axi_data.src_info[VFE_PIX_0].active = 1;
 	} else if (update_state == DISABLE_CAMIF) {
@@ -990,17 +1017,16 @@
 	uint8_t plane_idx)
 {
 	uint32_t val;
-	int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
-	uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
+	uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[plane_idx]);
 
 	if (!stream_info->frame_based) {
 		/*WR_IMAGE_SIZE*/
 		val =
 			((msm_isp_cal_word_per_line(
 			stream_info->output_format,
-			stream_info->plane_cfg[vfe_idx][plane_idx].
+			stream_info->plane_cfg[plane_idx].
 			output_width)+1)/2 - 1) << 16 |
-			(stream_info->plane_cfg[vfe_idx][plane_idx].
+			(stream_info->plane_cfg[plane_idx].
 			output_height - 1);
 		msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x10);
 
@@ -1008,9 +1034,9 @@
 		val =
 			msm_isp_cal_word_per_line(
 			stream_info->output_format,
-			stream_info->plane_cfg[vfe_idx][plane_idx].
+			stream_info->plane_cfg[plane_idx].
 			output_stride) << 16 |
-			(stream_info->plane_cfg[vfe_idx][plane_idx].
+			(stream_info->plane_cfg[plane_idx].
 			output_height - 1) << 4 | VFE32_BURST_LEN;
 		msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
 	} else {
@@ -1018,12 +1044,13 @@
 		val =
 			msm_isp_cal_word_per_line(
 			stream_info->output_format,
-			stream_info->plane_cfg[vfe_idx][plane_idx].
+			stream_info->plane_cfg[plane_idx].
 			output_width) << 16 |
-			(stream_info->plane_cfg[vfe_idx][plane_idx].
+			(stream_info->plane_cfg[plane_idx].
 			output_height - 1) << 4 | VFE32_BURST_LEN;
 		msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
 	}
+	return;
 }
 
 static void msm_vfe32_axi_clear_wm_reg(
@@ -1031,22 +1058,24 @@
 	struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
 {
 	uint32_t val = 0;
-	int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
-	uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
+	uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[plane_idx]);
+
+	/* FRAME BASED */
+	msm_camera_io_w(val, vfe_dev->vfe_base + wm_base);
 	/*WR_IMAGE_SIZE*/
 	msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x10);
 	/*WR_BUFFER_CFG*/
 	msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
+	return;
 }
 
 static void msm_vfe32_axi_cfg_wm_xbar_reg(
 	struct vfe_device *vfe_dev,
 	struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
 {
-	int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
 	struct msm_vfe_axi_plane_cfg *plane_cfg =
-		&stream_info->plane_cfg[vfe_idx][plane_idx];
-	uint8_t wm = stream_info->wm[vfe_idx][plane_idx];
+		&stream_info->plane_cfg[plane_idx];
+	uint8_t wm = stream_info->wm[plane_idx];
 	uint32_t xbar_cfg = 0;
 	uint32_t xbar_reg_cfg = 0;
 
@@ -1093,14 +1122,14 @@
 	xbar_reg_cfg &= ~(0xFF << VFE32_XBAR_SHIFT(wm));
 	xbar_reg_cfg |= (xbar_cfg << VFE32_XBAR_SHIFT(wm));
 	msm_camera_io_w(xbar_reg_cfg, vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
+	return;
 }
 
 static void msm_vfe32_axi_clear_wm_xbar_reg(
 	struct vfe_device *vfe_dev,
 	struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
 {
-	int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
-	uint8_t wm = stream_info->wm[vfe_idx][plane_idx];
+	uint8_t wm = stream_info->wm[plane_idx];
 	uint32_t xbar_reg_cfg = 0;
 
 	xbar_reg_cfg = msm_camera_io_r(vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
@@ -1108,21 +1137,95 @@
 	msm_camera_io_w(xbar_reg_cfg, vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
 }
 
-static void msm_vfe32_update_ping_pong_addr(void __iomem *vfe_base,
-	uint8_t wm_idx, uint32_t pingpong_bit, dma_addr_t paddr,
-	int32_t buf_size)
+static void msm_vfe32_cfg_axi_ub_equal_default(struct vfe_device *vfe_dev)
+{
+	int i;
+	uint32_t ub_offset = 0;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	uint32_t total_image_size = 0;
+	uint32_t num_used_wms = 0;
+	uint32_t prop_size = 0;
+	uint32_t wm_ub_size;
+	uint64_t delta;
+
+	for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+		if (axi_data->free_wm[i] > 0) {
+			num_used_wms++;
+			total_image_size += axi_data->wm_image_size[i];
+		}
+	}
+	prop_size = MSM_ISP32_TOTAL_WM_UB -
+		axi_data->hw_info->min_wm_ub * num_used_wms;
+	for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+		if (axi_data->free_wm[i]) {
+			delta =
+				(uint64_t)(axi_data->wm_image_size[i] *
+					prop_size);
+			do_div(delta, total_image_size);
+			wm_ub_size = axi_data->hw_info->min_wm_ub +
+				(uint32_t)delta;
+			msm_camera_io_w(ub_offset << 16 |
+				(wm_ub_size - 1), vfe_dev->vfe_base +
+					VFE32_WM_BASE(i) + 0xC);
+			ub_offset += wm_ub_size;
+		} else {
+			msm_camera_io_w(0,
+				vfe_dev->vfe_base + VFE32_WM_BASE(i) + 0xC);
+		}
+	}
+}
+
+static void msm_vfe32_cfg_axi_ub_equal_slicing(struct vfe_device *vfe_dev)
+{
+	int i;
+	uint32_t ub_offset = 0;
+	uint32_t final_ub_slice_size;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+	for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+		if (ub_offset + VFE32_EQUAL_SLICE_UB > VFE32_AXI_SLICE_UB) {
+			final_ub_slice_size = VFE32_AXI_SLICE_UB - ub_offset;
+			msm_camera_io_w(ub_offset << 16 |
+				(final_ub_slice_size - 1), vfe_dev->vfe_base +
+				VFE32_WM_BASE(i) + 0xC);
+			ub_offset += final_ub_slice_size;
+		} else {
+			msm_camera_io_w(ub_offset << 16 |
+				(VFE32_EQUAL_SLICE_UB - 1), vfe_dev->vfe_base +
+				VFE32_WM_BASE(i) + 0xC);
+			ub_offset += VFE32_EQUAL_SLICE_UB;
+		}
+	}
+}
+
+static void msm_vfe32_cfg_axi_ub(struct vfe_device *vfe_dev)
+{
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+	axi_data->wm_ub_cfg_policy = MSM_WM_UB_CFG_DEFAULT;
+	if (axi_data->wm_ub_cfg_policy == MSM_WM_UB_EQUAL_SLICING)
+		msm_vfe32_cfg_axi_ub_equal_slicing(vfe_dev);
+	else
+		msm_vfe32_cfg_axi_ub_equal_default(vfe_dev);
+}
+
+static void msm_vfe32_update_ping_pong_addr(struct vfe_device *vfe_dev,
+		uint8_t wm_idx, uint32_t pingpong_status, dma_addr_t paddr)
 {
 	uint32_t paddr32 = (paddr & 0xFFFFFFFF);
 
-	msm_camera_io_w(paddr32, vfe_base +
-		VFE32_PING_PONG_BASE(wm_idx, pingpong_bit));
+	msm_camera_io_w(paddr32, vfe_dev->vfe_base +
+		VFE32_PING_PONG_BASE(wm_idx, pingpong_status));
 }
 
 static int msm_vfe32_axi_halt(struct vfe_device *vfe_dev, uint32_t blocking)
 {
-	uint32_t halt_mask;
 	uint32_t axi_busy_flag = true;
 
+	/* Keep only halt and restart mask */
+	msm_camera_io_w(0x01800000, vfe_dev->vfe_base + 0x20);
+	/*Clear IRQ Status */
+	msm_camera_io_w(0xFE7FFFFF, vfe_dev->vfe_base + 0x28);
 	msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1D8);
 	while (axi_busy_flag) {
 		if (msm_camera_io_r(
@@ -1130,10 +1233,27 @@
 			axi_busy_flag = false;
 	}
 	msm_camera_io_w_mb(0, vfe_dev->vfe_base + 0x1D8);
-	halt_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x20);
-	halt_mask &= 0xFEFFFFFF;
-	/* Disable AXI IRQ */
-	msm_camera_io_w_mb(halt_mask, vfe_dev->vfe_base + 0x20);
+	return 0;
+}
+
+static int msm_vfe32_axi_restart(struct vfe_device *vfe_dev,
+	uint32_t blocking, uint32_t enable_camif)
+{
+	vfe_dev->hw_info->vfe_ops.core_ops.restore_irq_mask(vfe_dev);
+
+	/*Clear IRQ Status */
+	msm_camera_io_w(0xFE7FFFFF, vfe_dev->vfe_base + 0x28);
+	msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1D8);
+	msm_camera_io_w_mb(0xA, vfe_dev->vfe_base + 0x200);
+	/* Start AXI */
+	msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x1D8);
+	vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, 0xF);
+	memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+	atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
+	if (enable_camif) {
+		vfe_dev->hw_info->vfe_ops.core_ops.
+		update_camif_state(vfe_dev, ENABLE_CAMIF);
+	}
 	return 0;
 }
 
@@ -1187,20 +1307,36 @@
 }
 
 static void msm_vfe32_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
-	uint32_t stats_mask, uint8_t comp_idx, uint8_t enable)
+	uint32_t stats_mask, uint8_t enable)
 {
+	uint32_t i = 0;
+	atomic_t *stats_comp;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+	stats_mask = stats_mask & 0x7F;
+
+	for (i = 0;
+		i < vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask; i++) {
+		stats_comp = &stats_data->stats_comp_mask[i];
+		if (enable)
+			atomic_add(stats_mask, stats_comp);
+		else
+			atomic_sub(stats_mask, stats_comp);
+		ISP_DBG("%s: comp_mask: %x\n",
+			__func__, atomic_read(&stats_data->stats_comp_mask[i]));
+	}
+	return;
 }
 
 static void msm_vfe32_stats_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
 	struct msm_vfe_stats_stream *stream_info)
 {
-	int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
-				stream_info);
 	uint32_t irq_mask;
 
 	irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
-	irq_mask |= BIT(STATS_IDX(stream_info->stream_handle[vfe_idx]) + 13);
+	irq_mask |= BIT(STATS_IDX(stream_info->stream_handle) + 13);
 	msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
+	return;
 }
 
 static void msm_vfe32_stats_clear_wm_irq_mask(struct vfe_device *vfe_dev,
@@ -1211,18 +1347,21 @@
 	irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
 	irq_mask &= ~(BIT(STATS_IDX(stream_info->stream_handle) + 13));
 	msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
+	return;
 }
 
 static void msm_vfe32_stats_cfg_wm_reg(struct vfe_device *vfe_dev,
 	struct msm_vfe_stats_stream *stream_info)
 {
 	/*Nothing to configure for VFE3.x*/
+	return;
 }
 
 static void msm_vfe32_stats_clear_wm_reg(struct vfe_device *vfe_dev,
 	struct msm_vfe_stats_stream *stream_info)
 {
 	/*Nothing to configure for VFE3.x*/
+	return;
 }
 
 static void msm_vfe32_stats_cfg_ub(struct vfe_device *vfe_dev)
@@ -1247,12 +1386,7 @@
 		msm_camera_io_w(ub_offset << 16 | (ub_size[i] - 1),
 			vfe_dev->vfe_base + VFE32_STATS_BASE(i) + 0x8);
 	}
-}
-
-static bool msm_vfe32_is_module_cfg_lock_needed(
-	uint32_t reg_offset)
-{
-	return false;
+	return;
 }
 
 static void msm_vfe32_stats_enable_module(struct vfe_device *vfe_dev,
@@ -1294,15 +1428,13 @@
 
 static void msm_vfe32_stats_update_ping_pong_addr(struct vfe_device *vfe_dev,
 	struct msm_vfe_stats_stream *stream_info, uint32_t pingpong_status,
-	dma_addr_t paddr, uint32_t buf_sz)
+	dma_addr_t paddr)
 {
-	void __iomem *vfe_base = vfe_dev->vfe_base;
-	int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
-				stream_info);
 	uint32_t paddr32 = (paddr & 0xFFFFFFFF);
-	int stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
 
-	msm_camera_io_w(paddr32, vfe_base +
+	int stats_idx = STATS_IDX(stream_info->stream_handle);
+
+	msm_camera_io_w(paddr32, vfe_dev->vfe_base +
 		VFE32_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
 }
 
@@ -1361,28 +1493,6 @@
 		goto vfe_no_resource;
 	}
 
-	if (!vfe_dev->pdev->dev.of_node)
-		vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe_imgwr");
-	else
-		vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe");
-
-	if (!vfe_dev->iommu_ctx[0]) {
-		pr_err("%s: no iommux ctx resource?\n", __func__);
-		rc = -ENODEV;
-		goto vfe_no_resource;
-	}
-
-	if (!vfe_dev->pdev->dev.of_node)
-		vfe_dev->iommu_ctx[1] = msm_iommu_get_ctx("vfe_misc");
-	else
-		vfe_dev->iommu_ctx[1] = msm_iommu_get_ctx("vfe");
-
-	if (!vfe_dev->iommu_ctx[1]) {
-		pr_err("%s: no iommux ctx resource?\n", __func__);
-		rc = -ENODEV;
-		goto vfe_no_resource;
-	}
-
 vfe_no_resource:
 	return rc;
 }
@@ -1394,13 +1504,27 @@
 	*error_mask1 = 0x007FFFFF;
 }
 
-struct msm_vfe_axi_hardware_info msm_vfe32_axi_hw_info = {
-	.num_wm = 5,
+
+static void msm_vfe32_restore_irq_mask(struct vfe_device *vfe_dev)
+{
+	msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask0,
+		vfe_dev->vfe_base + 0x1C);
+	msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask1,
+		vfe_dev->vfe_base + 0x20);
+}
+
+static void msm_vfe32_get_halt_restart_mask(uint32_t *irq0_mask,
+	uint32_t *irq1_mask)
+{
+	*irq1_mask = 0x01800000;
+}
+
+static struct msm_vfe_axi_hardware_info msm_vfe32_axi_hw_info = {
+	.num_wm = 6,
 	.num_comp_mask = 3,
 	.num_rdi = 3,
 	.num_rdi_master = 3,
 	.min_wm_ub = 64,
-	.scratch_buf_range = SZ_32M,
 };
 
 static struct msm_vfe_stats_hardware_info msm_vfe32_stats_hw_info = {
@@ -1412,7 +1536,22 @@
 		1 << MSM_ISP_STATS_SKIN | 1 << MSM_ISP_STATS_BHIST,
 	.stats_ping_pong_offset = stats_pingpong_offset_map,
 	.num_stats_type = VFE32_NUM_STATS_TYPE,
-	.num_stats_comp_mask = 0,
+	.num_stats_comp_mask = 1,
+};
+
+static struct v4l2_subdev_core_ops msm_vfe32_subdev_core_ops = {
+	.ioctl = msm_isp_ioctl,
+	.subscribe_event = msm_isp_subscribe_event,
+	.unsubscribe_event = msm_isp_unsubscribe_event,
+};
+
+static struct v4l2_subdev_ops msm_vfe32_subdev_ops = {
+	.core = &msm_vfe32_subdev_core_ops,
+};
+
+static struct v4l2_subdev_internal_ops msm_vfe32_internal_ops = {
+	.open = msm_isp_open_node,
+	.close = msm_isp_close_node,
 };
 
 struct msm_vfe_hardware_info vfe32_hw_info = {
@@ -1421,16 +1560,14 @@
 	.vfe_clk_idx = VFE32_CLK_IDX,
 	.vfe_ops = {
 		.irq_ops = {
-			.read_and_clear_irq_status =
-				msm_vfe32_read_and_clear_irq_status,
 			.read_irq_status = msm_vfe32_read_irq_status,
 			.process_camif_irq = msm_vfe32_process_camif_irq,
 			.process_reset_irq = msm_vfe32_process_reset_irq,
 			.process_halt_irq = msm_vfe32_process_halt_irq,
 			.process_reg_update = msm_vfe32_process_reg_update,
+			.process_epoch_irq = msm_vfe32_process_epoch_irq,
 			.process_axi_irq = msm_isp_process_axi_irq,
 			.process_stats_irq = msm_isp_process_stats_irq,
-			.process_epoch_irq = msm_vfe32_process_epoch_irq,
 		},
 		.axi_ops = {
 			.reload_wm = msm_vfe32_axi_reload_wm,
@@ -1446,15 +1583,14 @@
 			.clear_wm_reg = msm_vfe32_axi_clear_wm_reg,
 			.cfg_wm_xbar_reg = msm_vfe32_axi_cfg_wm_xbar_reg,
 			.clear_wm_xbar_reg = msm_vfe32_axi_clear_wm_xbar_reg,
-			.cfg_ub = msm_vfe47_cfg_axi_ub,
+			.cfg_ub = msm_vfe32_cfg_axi_ub,
 			.update_ping_pong_addr =
 				msm_vfe32_update_ping_pong_addr,
 			.get_comp_mask = msm_vfe32_get_comp_mask,
 			.get_wm_mask = msm_vfe32_get_wm_mask,
 			.get_pingpong_status = msm_vfe32_get_pingpong_status,
 			.halt = msm_vfe32_axi_halt,
-			.ub_reg_offset = msm_vfe40_ub_reg_offset,
-			.get_ub_size = msm_vfe40_get_ub_size,
+			.restart = msm_vfe32_axi_restart,
 		},
 		.core_ops = {
 			.reg_update = msm_vfe32_reg_update,
@@ -1469,13 +1605,13 @@
 			.release_hw = msm_vfe32_release_hardware,
 			.get_platform_data = msm_vfe32_get_platform_data,
 			.get_error_mask = msm_vfe32_get_error_mask,
-			.process_error_status = msm_vfe32_process_error_status,
 			.get_overflow_mask = msm_vfe32_get_overflow_mask,
-			.is_module_cfg_lock_needed =
-				msm_vfe32_is_module_cfg_lock_needed,
-			.ahb_clk_cfg = NULL,
-			.set_bus_err_ign_mask = NULL,
-			.get_bus_err_mask = NULL,
+			.get_rdi_wm_mask = msm_vfe32_get_rdi_wm_mask,
+			.get_irq_mask = msm_vfe32_get_irq_mask,
+			.restore_irq_mask = msm_vfe32_restore_irq_mask,
+			.get_halt_restart_mask =
+				msm_vfe32_get_halt_restart_mask,
+			.process_error_status = msm_vfe32_process_error_status,
 		},
 		.stats_ops = {
 			.get_stats_idx = msm_vfe32_get_stats_idx,
@@ -1493,47 +1629,12 @@
 			.get_wm_mask = msm_vfe32_stats_get_wm_mask,
 			.get_frame_id = msm_vfe32_stats_get_frame_id,
 			.get_pingpong_status = msm_vfe32_get_pingpong_status,
-			.enable_stats_wm = NULL,
 		},
 	},
 	.dmi_reg_offset = 0x5A0,
 	.axi_hw_info = &msm_vfe32_axi_hw_info,
 	.stats_hw_info = &msm_vfe32_stats_hw_info,
+	.subdev_ops = &msm_vfe32_subdev_ops,
+	.subdev_internal_ops = &msm_vfe32_internal_ops,
 };
 EXPORT_SYMBOL(vfe32_hw_info);
-
-static const struct of_device_id msm_vfe32_dt_match[] = {
-	{
-		.compatible = "qcom,vfe32",
-		.data = &vfe32_hw_info,
-	},
-	{}
-};
-
-MODULE_DEVICE_TABLE(of, msm_vfe32_dt_match);
-
-static struct platform_driver vfe32_driver = {
-	.probe = vfe_hw_probe,
-	.driver = {
-		.name = "msm_vfe32",
-		.owner = THIS_MODULE,
-		.of_match_table = msm_vfe32_dt_match,
-	},
-	.id_table = msm_vfe32_dev_id,
-};
-
-static int __init msm_vfe32_init_module(void)
-{
-	return platform_driver_register(&vfe32_driver);
-}
-
-static void __exit msm_vfe32_exit_module(void)
-{
-	platform_driver_unregister(&vfe32_driver);
-}
-
-module_init(msm_vfe32_init_module);
-module_exit(msm_vfe32_exit_module);
-MODULE_DESCRIPTION("MSM VFE32 driver");
-MODULE_LICENSE("GPL v2");
-
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index 19b1aac..72ab3ba 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -889,8 +889,6 @@
 		msm_camera_io_w(temp | (framedrop_period - 1) << 2,
 		vfe_base + VFE40_WM_BASE(stream_info->wm[vfe_idx][i]) + 0xC);
 	}
-
-	msm_camera_io_w_mb(0x1, vfe_base + 0x378);
 }
 
 static void msm_vfe40_clear_framedrop(struct vfe_device *vfe_dev,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.c
new file mode 100644
index 0000000..eada5fa
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.c
@@ -0,0 +1,557 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/of_device.h>
+#include <linux/sched_clock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+
+#include "msm_isp_32.h"
+#include "msm_isp_util_32.h"
+#include "msm_isp_axi_util_32.h"
+#include "msm_isp_stats_util_32.h"
+#include "msm_sd.h"
+#include "msm_isp32.h"
+
+static struct msm_sd_req_vb2_q vfe_vb2_ops;
+
+static const struct of_device_id msm_vfe_dt_match[] = {
+	{
+		.compatible = "qcom,vfe32",
+		.data = &vfe32_hw_info,
+	},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_vfe_dt_match);
+
+static const struct platform_device_id msm_vfe_dev_id[] = {
+	{"msm_vfe32", (kernel_ulong_t) &vfe32_hw_info},
+	{}
+};
+#define MAX_OVERFLOW_COUNTERS  29
+#define OVERFLOW_LENGTH 1024
+#define OVERFLOW_BUFFER_LENGTH 64
+static char stat_line[OVERFLOW_LENGTH];
+
+static struct msm_isp_buf_mgr vfe_buf_mgr;
+static int msm_isp_enable_debugfs(struct vfe_device *vfe_dev,
+				  struct msm_isp_bw_req_info *isp_req_hist);
+static char *stats_str[MAX_OVERFLOW_COUNTERS] = {
+	"imgmaster0_overflow_cnt",
+	"imgmaster1_overflow_cnt",
+	"imgmaster2_overflow_cnt",
+	"imgmaster3_overflow_cnt",
+	"imgmaster4_overflow_cnt",
+	"imgmaster5_overflow_cnt",
+	"imgmaster6_overflow_cnt",
+	"be_overflow_cnt",
+	"bg_overflow_cnt",
+	"bf_overflow_cnt",
+	"awb_overflow_cnt",
+	"rs_overflow_cnt",
+	"cs_overflow_cnt",
+	"ihist_overflow_cnt",
+	"skinbhist_overflow_cnt",
+	"bfscale_overflow_cnt",
+	"ISP_VFE0_client_info.active",
+	"ISP_VFE0_client_info.ab",
+	"ISP_VFE0_client_info.ib",
+	"ISP_VFE1_client_info.active",
+	"ISP_VFE1_client_info.ab",
+	"ISP_VFE1_client_info.ib",
+	"ISP_CPP_client_info.active",
+	"ISP_CPP_client_info.ab",
+	"ISP_CPP_client_info.ib",
+	"ISP_last_overflow.ab",
+	"ISP_last_overflow.ib",
+	"ISP_VFE_CLK_RATE",
+	"ISP_CPP_CLK_RATE",
+};
+
+#define MAX_DEPTH_BW_REQ_HISTORY 25
+#define MAX_BW_HISTORY_BUFF_LEN  6144
+#define MAX_BW_HISTORY_LINE_BUFF_LEN 512
+
+#define MAX_UB_INFO_BUFF_LEN  1024
+#define MAX_UB_INFO_LINE_BUFF_LEN 256
+
+static struct msm_isp_bw_req_info
+		msm_isp_bw_request_history[MAX_DEPTH_BW_REQ_HISTORY];
+static int msm_isp_bw_request_history_idx;
+static char bw_request_history_buff[MAX_BW_HISTORY_BUFF_LEN];
+static char ub_info_buffer[MAX_UB_INFO_BUFF_LEN];
+static spinlock_t req_history_lock;
+static int vfe_debugfs_statistics_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t vfe_debugfs_statistics_read(struct file *t_file,
+	char __user *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+	int i;
+	uint64_t *ptr;
+	char buffer[OVERFLOW_BUFFER_LENGTH] = {0};
+	struct vfe_device *vfe_dev = (struct vfe_device *)
+		t_file->private_data;
+	struct msm_isp_statistics *stats = vfe_dev->stats;
+
+	memset(stat_line, 0, sizeof(stat_line));
+	msm_isp_util_get_bandwidth_stats(vfe_dev, stats);
+	ptr = (uint64_t *)(stats);
+	for (i = 0; i < MAX_OVERFLOW_COUNTERS; i++) {
+		strlcat(stat_line, stats_str[i], sizeof(stat_line));
+		strlcat(stat_line, "     ", sizeof(stat_line));
+		snprintf(buffer, sizeof(buffer), "%llu", ptr[i]);
+		strlcat(stat_line, buffer, sizeof(stat_line));
+		strlcat(stat_line, "\r\n", sizeof(stat_line));
+	}
+	return simple_read_from_buffer(t_char, t_size_t,
+		t_loff_t, stat_line, strlen(stat_line));
+}
+
+static ssize_t vfe_debugfs_statistics_write(struct file *t_file,
+	const char __user *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+	struct vfe_device *vfe_dev = (struct vfe_device *)
+		t_file->private_data;
+	struct msm_isp_statistics *stats = vfe_dev->stats;
+
+	memset(stats, 0, sizeof(struct msm_isp_statistics));
+
+	return sizeof(struct msm_isp_statistics);
+}
+
+static int bw_history_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t bw_history_read(struct file *t_file, char __user *t_char,
+	size_t t_size_t, loff_t *t_loff_t)
+{
+	int i;
+	char *out_buffer = bw_request_history_buff;
+	char line_buffer[MAX_BW_HISTORY_LINE_BUFF_LEN] = {0};
+	struct msm_isp_bw_req_info *isp_req_hist =
+		(struct msm_isp_bw_req_info *) t_file->private_data;
+
+	memset(out_buffer, 0, MAX_BW_HISTORY_BUFF_LEN);
+
+	snprintf(line_buffer, sizeof(line_buffer),
+		"Bus bandwidth request history in chronological order:\n");
+	strlcat(out_buffer, line_buffer, sizeof(bw_request_history_buff));
+
+	snprintf(line_buffer, sizeof(line_buffer),
+		"MSM_ISP_MIN_AB = %u, MSM_ISP_MIN_IB = %u\n\n",
+		MSM_ISP_MIN_AB, MSM_ISP_MIN_IB);
+	strlcat(out_buffer, line_buffer, sizeof(bw_request_history_buff));
+
+	for (i = 0; i < MAX_DEPTH_BW_REQ_HISTORY; i++) {
+		snprintf(line_buffer, sizeof(line_buffer),
+		 "idx = %d, client = %u, timestamp = %llu, ab = %llu, ib = %llu\n"
+		 "ISP0.active = %x, ISP0.ab = %llu, ISP0.ib = %llu\n"
+		 "ISP1.active = %x, ISP1.ab = %llu, ISP1.ib = %llu\n"
+		 "CPP.active = %x, CPP.ab = %llu, CPP.ib = %llu\n\n",
+		 i, isp_req_hist[i].client, isp_req_hist[i].timestamp,
+		 isp_req_hist[i].total_ab, isp_req_hist[i].total_ib,
+		 isp_req_hist[i].client_info[0].active,
+		 isp_req_hist[i].client_info[0].ab,
+		 isp_req_hist[i].client_info[0].ib,
+		 isp_req_hist[i].client_info[1].active,
+		 isp_req_hist[i].client_info[1].ab,
+		 isp_req_hist[i].client_info[1].ib,
+		 isp_req_hist[i].client_info[2].active,
+		 isp_req_hist[i].client_info[2].ab,
+		 isp_req_hist[i].client_info[2].ib);
+		strlcat(out_buffer, line_buffer,
+		sizeof(bw_request_history_buff));
+	}
+	return simple_read_from_buffer(t_char, t_size_t,
+		t_loff_t, out_buffer, strlen(out_buffer));
+}
+
+static ssize_t bw_history_write(struct file *t_file,
+	const char __user *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+	struct msm_isp_bw_req_info *isp_req_hist =
+		(struct msm_isp_bw_req_info *) t_file->private_data;
+
+	memset(isp_req_hist, 0, sizeof(msm_isp_bw_request_history));
+	msm_isp_bw_request_history_idx = 0;
+	return sizeof(msm_isp_bw_request_history);
+}
+
+static int ub_info_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t ub_info_read(struct file *t_file, char __user *t_char,
+	size_t t_size_t, loff_t *t_loff_t)
+{
+	int i;
+	char *out_buffer = ub_info_buffer;
+	char line_buffer[MAX_UB_INFO_LINE_BUFF_LEN] = {0};
+	struct vfe_device *vfe_dev =
+		(struct vfe_device *) t_file->private_data;
+	struct msm_isp_ub_info *ub_info = vfe_dev->ub_info;
+
+	memset(out_buffer, 0, MAX_UB_INFO_LINE_BUFF_LEN);
+	snprintf(line_buffer, sizeof(line_buffer),
+		"wm_ub_policy_type = %d\n"
+		"num_wm = %d\n"
+		"wm_ub = %d\n",
+		ub_info->policy, ub_info->num_wm, ub_info->wm_ub);
+	strlcat(out_buffer, line_buffer,
+	    sizeof(ub_info_buffer));
+	for (i = 0; i < ub_info->num_wm; i++) {
+		snprintf(line_buffer, sizeof(line_buffer),
+			"data[%d] = 0x%x, addr[%d] = 0x%llx\n",
+			i, ub_info->data[i], i, ub_info->addr[i]);
+		strlcat(out_buffer, line_buffer,
+			sizeof(ub_info_buffer));
+	}
+
+	return simple_read_from_buffer(t_char, t_size_t,
+		t_loff_t, out_buffer, strlen(out_buffer));
+}
+
+static ssize_t ub_info_write(struct file *t_file,
+	const char __user *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+	struct vfe_device *vfe_dev =
+		(struct vfe_device *) t_file->private_data;
+	struct msm_isp_ub_info *ub_info = vfe_dev->ub_info;
+
+	memset(ub_info, 0, sizeof(struct msm_isp_ub_info));
+
+	return sizeof(struct msm_isp_ub_info);
+}
+
+static const struct file_operations vfe_debugfs_error = {
+	.open = vfe_debugfs_statistics_open,
+	.read = vfe_debugfs_statistics_read,
+	.write = vfe_debugfs_statistics_write,
+};
+
+static const struct file_operations bw_history_ops = {
+	.open = bw_history_open,
+	.read = bw_history_read,
+	.write = bw_history_write,
+};
+
+static const struct file_operations ub_info_ops = {
+	.open = ub_info_open,
+	.read = ub_info_read,
+	.write = ub_info_write,
+};
+
+static int msm_isp_enable_debugfs(struct vfe_device *vfe_dev,
+				  struct msm_isp_bw_req_info *isp_req_hist)
+{
+	struct dentry *debugfs_base;
+	char dirname[32] = {0};
+
+	snprintf(dirname, sizeof(dirname), "msm_isp%d", vfe_dev->pdev->id);
+	debugfs_base = debugfs_create_dir(dirname, NULL);
+	if (!debugfs_base)
+		return -ENOMEM;
+	if (!debugfs_create_file("stats", 0644, debugfs_base,
+		vfe_dev, &vfe_debugfs_error))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("bw_req_history", 0644,
+		debugfs_base, isp_req_hist, &bw_history_ops))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("ub_info", 0644,
+		debugfs_base, vfe_dev, &ub_info_ops))
+		return -ENOMEM;
+
+	return 0;
+}
+
+void msm_isp_update_req_history(uint32_t client, uint64_t ab,
+				 uint64_t ib,
+				 struct msm_isp_bandwidth_info *client_info,
+				 unsigned long long ts)
+{
+	int i;
+
+	spin_lock(&req_history_lock);
+	msm_isp_bw_request_history[msm_isp_bw_request_history_idx].client =
+		client;
+	msm_isp_bw_request_history[msm_isp_bw_request_history_idx].timestamp =
+		ts;
+	msm_isp_bw_request_history[msm_isp_bw_request_history_idx].total_ab =
+		ab;
+	msm_isp_bw_request_history[msm_isp_bw_request_history_idx].total_ib =
+		ib;
+
+	for (i = 0; i < MAX_ISP_CLIENT; i++) {
+		msm_isp_bw_request_history[msm_isp_bw_request_history_idx].
+			client_info[i].active = client_info[i].active;
+		msm_isp_bw_request_history[msm_isp_bw_request_history_idx].
+			client_info[i].ab = client_info[i].ab;
+		msm_isp_bw_request_history[msm_isp_bw_request_history_idx].
+			client_info[i].ib = client_info[i].ib;
+	}
+
+	msm_isp_bw_request_history_idx = (msm_isp_bw_request_history_idx + 1)
+			 % MAX_DEPTH_BW_REQ_HISTORY;
+	spin_unlock(&req_history_lock);
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_isp_dqevent(struct file *file, struct v4l2_fh *vfh, void *arg)
+{
+	long rc;
+
+	if (is_compat_task()) {
+		struct msm_isp32_event_data32 *event_data32;
+		struct msm_isp32_event_data  *event_data;
+		struct v4l2_event isp_event;
+		struct v4l2_event *isp_event_user;
+
+		memset(&isp_event, 0, sizeof(isp_event));
+		rc = v4l2_event_dequeue(vfh, &isp_event,
+				file->f_flags & O_NONBLOCK);
+		if (rc)
+			return rc;
+		event_data = (struct msm_isp32_event_data *)
+				isp_event.u.data;
+		isp_event_user = (struct v4l2_event *)arg;
+		memcpy(isp_event_user, &isp_event,
+				sizeof(*isp_event_user));
+		event_data32 = (struct msm_isp32_event_data32 *)
+			isp_event_user->u.data;
+		memset(event_data32, 0,
+				sizeof(struct msm_isp32_event_data32));
+		event_data32->timestamp.tv_sec =
+				event_data->timestamp.tv_sec;
+		event_data32->timestamp.tv_usec =
+				event_data->timestamp.tv_usec;
+		event_data32->mono_timestamp.tv_sec =
+				event_data->mono_timestamp.tv_sec;
+		event_data32->mono_timestamp.tv_usec =
+				event_data->mono_timestamp.tv_usec;
+		event_data32->input_intf = event_data->input_intf;
+		event_data32->frame_id = event_data->frame_id;
+		memcpy(&(event_data32->u), &(event_data->u),
+					sizeof(event_data32->u));
+	} else {
+		rc = v4l2_event_dequeue(vfh, arg,
+				file->f_flags & O_NONBLOCK);
+	}
+	return rc;
+}
+#else
+static long msm_isp_dqevent(struct file *file, struct v4l2_fh *vfh, void *arg)
+{
+	return v4l2_event_dequeue(vfh, arg,
+			file->f_flags & O_NONBLOCK);
+}
+#endif
+
+static long msm_isp_subdev_do_ioctl(
+	struct file *file, unsigned int cmd, void *arg)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+	struct v4l2_fh *vfh = file->private_data;
+
+	switch (cmd) {
+	case VIDIOC_DQEVENT: {
+		if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+			return -ENOIOCTLCMD;
+		return msm_isp_dqevent(file, vfh, arg);
+	}
+	break;
+	case VIDIOC_SUBSCRIBE_EVENT:
+		return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
+
+	case VIDIOC_UNSUBSCRIBE_EVENT:
+		return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
+
+	default:
+		return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
+	}
+}
+
+static long msm_isp_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+	unsigned long arg)
+{
+	return video_usercopy(file, cmd, arg, msm_isp_subdev_do_ioctl);
+}
+
+static struct v4l2_file_operations msm_isp_v4l2_subdev_fops = {
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = msm_isp_subdev_fops_ioctl,
+#endif
+	.unlocked_ioctl = msm_isp_subdev_fops_ioctl
+};
+
+static int vfe_probe(struct platform_device *pdev)
+{
+	struct vfe_device *vfe_dev;
+	/*struct msm_cam_subdev_info sd_info;*/
+	const struct of_device_id *match_dev;
+	int rc = 0;
+
+	vfe_dev = kzalloc(sizeof(struct vfe_device), GFP_KERNEL);
+	if (!vfe_dev) {
+		rc = -ENOMEM;
+		goto end;
+	}
+	vfe_dev->stats = kzalloc(sizeof(struct msm_isp_statistics), GFP_KERNEL);
+	if (!vfe_dev->stats) {
+		rc = -ENOMEM;
+		goto probe_fail1;
+	}
+
+	vfe_dev->ub_info = kzalloc(sizeof(struct msm_isp_ub_info), GFP_KERNEL);
+	if (!vfe_dev->ub_info) {
+		rc = -ENOMEM;
+		goto probe_fail2;
+	}
+	if (pdev->dev.of_node) {
+		of_property_read_u32((&pdev->dev)->of_node,
+			"cell-index", &pdev->id);
+		match_dev = of_match_device(msm_vfe_dt_match, &pdev->dev);
+		if (!match_dev) {
+			pr_err("%s: No vfe hardware info\n", __func__);
+			rc = -EINVAL;
+			goto probe_fail3;
+		}
+		vfe_dev->hw_info =
+			(struct msm_vfe_hardware_info *) match_dev->data;
+	} else {
+		vfe_dev->hw_info = (struct msm_vfe_hardware_info *)
+			platform_get_device_id(pdev)->driver_data;
+	}
+
+	if (!vfe_dev->hw_info) {
+		pr_err("%s: No vfe hardware info\n", __func__);
+		rc = -EINVAL;
+		goto probe_fail3;
+	}
+	ISP_DBG("%s: device id = %d\n", __func__, pdev->id);
+
+	vfe_dev->pdev = pdev;
+	rc = vfe_dev->hw_info->vfe_ops.core_ops.get_platform_data(vfe_dev);
+	if (rc < 0) {
+		pr_err("%s: failed to get platform resources\n", __func__);
+		rc = -ENOMEM;
+		goto probe_fail3;
+	}
+
+	INIT_LIST_HEAD(&vfe_dev->tasklet_q);
+	tasklet_init(&vfe_dev->vfe_tasklet,
+		msm_isp_do_tasklet, (unsigned long)vfe_dev);
+
+	v4l2_subdev_init(&vfe_dev->subdev.sd, vfe_dev->hw_info->subdev_ops);
+	vfe_dev->subdev.sd.internal_ops =
+		vfe_dev->hw_info->subdev_internal_ops;
+	snprintf(vfe_dev->subdev.sd.name,
+		ARRAY_SIZE(vfe_dev->subdev.sd.name),
+		"vfe");
+	vfe_dev->subdev.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+	vfe_dev->subdev.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+	v4l2_set_subdevdata(&vfe_dev->subdev.sd, vfe_dev);
+	platform_set_drvdata(pdev, &vfe_dev->subdev.sd);
+	mutex_init(&vfe_dev->realtime_mutex);
+	mutex_init(&vfe_dev->core_mutex);
+	spin_lock_init(&vfe_dev->tasklet_lock);
+	spin_lock_init(&vfe_dev->shared_data_lock);
+	spin_lock_init(&req_history_lock);
+	media_entity_pads_init(&vfe_dev->subdev.sd.entity, 0, NULL);
+	vfe_dev->subdev.sd.entity.function = MSM_CAMERA_SUBDEV_VFE;
+	vfe_dev->subdev.sd.entity.name = pdev->name;
+	vfe_dev->subdev.close_seq = MSM_SD_CLOSE_1ST_CATEGORY | 0x2;
+	rc = msm_sd_register(&vfe_dev->subdev);
+	if (rc != 0) {
+		pr_err("%s: msm_sd_register error = %d\n", __func__, rc);
+		goto probe_fail3;
+	}
+
+	msm_isp_v4l2_subdev_fops.owner = v4l2_subdev_fops.owner;
+	msm_isp_v4l2_subdev_fops.open = v4l2_subdev_fops.open;
+	msm_isp_v4l2_subdev_fops.release = v4l2_subdev_fops.release;
+	msm_isp_v4l2_subdev_fops.poll = v4l2_subdev_fops.poll;
+
+	vfe_dev->subdev.sd.devnode->fops = &msm_isp_v4l2_subdev_fops;
+
+	vfe_dev->buf_mgr = &vfe_buf_mgr;
+	v4l2_subdev_notify(&vfe_dev->subdev.sd,
+		MSM_SD_NOTIFY_REQ_CB, &vfe_vb2_ops);
+	rc = msm_isp_create_isp_buf_mgr(vfe_dev->buf_mgr,
+		&vfe_vb2_ops, &pdev->dev,
+		vfe_dev->hw_info->axi_hw_info->scratch_buf_range);
+	if (rc < 0) {
+		pr_err("%s: Unable to create buffer manager\n", __func__);
+		rc = -EINVAL;
+		goto probe_fail3;
+	}
+	msm_isp_enable_debugfs(vfe_dev, msm_isp_bw_request_history);
+
+	vfe_dev->buf_mgr->init_done = 1;
+	vfe_dev->vfe_open_cnt = 0;
+	return rc;
+
+probe_fail3:
+	kfree(vfe_dev->ub_info);
+probe_fail2:
+	kfree(vfe_dev->stats);
+probe_fail1:
+	kfree(vfe_dev);
+end:
+	return rc;
+}
+
+static struct platform_driver vfe_driver = {
+	.probe = vfe_probe,
+	.driver = {
+		.name = "msm_vfe",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_vfe_dt_match,
+	},
+	.id_table = msm_vfe_dev_id,
+};
+
+static int __init msm_vfe_init_module(void)
+{
+	return platform_driver_register(&vfe_driver);
+}
+
+static void __exit msm_vfe_exit_module(void)
+{
+	platform_driver_unregister(&vfe_driver);
+}
+
+module_init(msm_vfe_init_module);
+module_exit(msm_vfe_exit_module);
+MODULE_DESCRIPTION("MSM VFE driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.h
new file mode 100644
index 0000000..cbe92fa
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.h
@@ -0,0 +1,599 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_VFE_H__
+#define __MSM_VFE_H__
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <media/v4l2-subdev.h>
+#include <media/msmb_isp.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+
+#include "msm_buf_mgr.h"
+
+#define VFE40_8974V1_VERSION 0x10000018
+#define VFE40_8974V2_VERSION 0x1001001A
+#define VFE40_8974V3_VERSION 0x1001001B
+#define VFE40_8x26_VERSION 0x20000013
+#define VFE40_8x26V2_VERSION 0x20010014
+#define VFE40_8916_VERSION 0x10030000
+#define VFE40_8939_VERSION 0x10040000
+#define VFE32_8909_VERSION 0x30600
+
+#define MAX_IOMMU_CTX 2
+#define MAX_NUM_WM 7
+#define MAX_NUM_RDI 3
+#define MAX_NUM_RDI_MASTER 3
+#define MAX_NUM_COMPOSITE_MASK 4
+#define MAX_NUM_STATS_COMP_MASK 2
+#define MAX_INIT_FRAME_DROP 31
+#define ISP_Q2 (1 << 2)
+#define ISP_Q10 (1 << 10)
+
+#define VFE_PING_FLAG 0xFFFFFFFF
+#define VFE_PONG_FLAG 0x0
+
+#define VFE_MAX_CFG_TIMEOUT 3000
+#define VFE_CLK_INFO_MAX 16
+#define STATS_COMP_BIT_MASK 0xFF0000
+
+#define MSM_ISP_MIN_AB 11000000
+#define MSM_ISP_MIN_IB 11000000
+
+struct vfe_device;
+struct msm_vfe_axi_stream;
+struct msm_vfe_stats_stream;
+
+struct vfe_subscribe_info {
+	struct v4l2_fh *vfh;
+	uint32_t active;
+};
+
+enum msm_isp_pack_fmt {
+	QCOM,
+	MIPI,
+	DPCM6,
+	DPCM8,
+	PLAIN8,
+	PLAIN16,
+	MAX_ISP_PACK_FMT,
+};
+
+enum msm_isp_camif_update_state {
+	NO_UPDATE,
+	ENABLE_CAMIF,
+	DISABLE_CAMIF,
+	DISABLE_CAMIF_IMMEDIATELY
+};
+
+struct msm_isp_timestamp {
+	/*Monotonic clock for v4l2 buffer*/
+	struct timeval buf_time;
+	/*Monotonic clock for VT */
+	struct timeval vt_time;
+	/*Wall clock for userspace event*/
+	struct timeval event_time;
+};
+
+struct msm_vfe_irq_ops {
+	void (*read_irq_status)(struct vfe_device *vfe_dev,
+		uint32_t *irq_status0, uint32_t *irq_status1);
+	void (*process_reg_update)(struct vfe_device *vfe_dev,
+		uint32_t irq_status0, uint32_t irq_status1,
+		struct msm_isp_timestamp *ts);
+	void (*process_epoch_irq)(struct vfe_device *vfe_dev,
+		uint32_t irq_status0, uint32_t irq_status1,
+		struct msm_isp_timestamp *ts);
+	void (*process_reset_irq)(struct vfe_device *vfe_dev,
+		uint32_t irq_status0, uint32_t irq_status1);
+	void (*process_halt_irq)(struct vfe_device *vfe_dev,
+		uint32_t irq_status0, uint32_t irq_status1);
+	void (*process_camif_irq)(struct vfe_device *vfe_dev,
+		uint32_t irq_status0, uint32_t irq_status1,
+		struct msm_isp_timestamp *ts);
+	void (*process_axi_irq)(struct vfe_device *vfe_dev,
+		uint32_t irq_status0, uint32_t irq_status1,
+		struct msm_isp_timestamp *ts);
+	void (*process_stats_irq)(struct vfe_device *vfe_dev,
+		uint32_t irq_status0, uint32_t irq_status1,
+		struct msm_isp_timestamp *ts);
+};
+
+struct msm_vfe_axi_ops {
+	void (*reload_wm)(struct vfe_device *vfe_dev,
+		uint32_t reload_mask);
+	void (*enable_wm)(struct vfe_device *vfe_dev,
+		uint8_t wm_idx, uint8_t enable);
+	int32_t (*cfg_io_format)(struct vfe_device *vfe_dev,
+		enum msm_vfe_axi_stream_src stream_src,
+		uint32_t io_format);
+	void (*cfg_framedrop)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info);
+	void (*clear_framedrop)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info);
+	void (*cfg_comp_mask)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info);
+	void (*clear_comp_mask)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info);
+	void (*cfg_wm_irq_mask)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info);
+	void (*clear_wm_irq_mask)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info);
+
+	void (*cfg_wm_reg)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info,
+		uint8_t plane_idx);
+	void (*clear_wm_reg)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx);
+
+	void (*cfg_wm_xbar_reg)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info,
+		uint8_t plane_idx);
+	void (*clear_wm_xbar_reg)(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx);
+
+	void (*cfg_ub)(struct vfe_device *vfe_dev);
+
+	void (*update_ping_pong_addr)(struct vfe_device *vfe_dev,
+		uint8_t wm_idx, uint32_t pingpong_status, dma_addr_t paddr);
+
+	uint32_t (*get_wm_mask)(uint32_t irq_status0, uint32_t irq_status1);
+	uint32_t (*get_comp_mask)(uint32_t irq_status0, uint32_t irq_status1);
+	uint32_t (*get_pingpong_status)(struct vfe_device *vfe_dev);
+	int (*halt)(struct vfe_device *vfe_dev, uint32_t blocking);
+	int (*restart)(struct vfe_device *vfe_dev, uint32_t blocking,
+		uint32_t enable_camif);
+	void (*update_cgc_override)(struct vfe_device *vfe_dev,
+		uint8_t wm_idx, uint8_t cgc_override);
+};
+
+struct msm_vfe_core_ops {
+	void (*reg_update)(struct vfe_device *vfe_dev, uint32_t input_src);
+	long (*reset_hw)(struct vfe_device *vfe_dev, uint32_t first_start,
+		uint32_t blocking_call);
+	int (*init_hw)(struct vfe_device *vfe_dev);
+	void (*init_hw_reg)(struct vfe_device *vfe_dev);
+	void (*clear_status_reg)(struct vfe_device *vfe_dev);
+	void (*release_hw)(struct vfe_device *vfe_dev);
+	void (*cfg_input_mux)(struct vfe_device *vfe_dev,
+		struct msm_vfe_pix_cfg *pix_cfg);
+	int (*start_fetch_eng)(struct vfe_device *vfe_dev,
+		void *arg);
+	void (*update_camif_state)(struct vfe_device *vfe_dev,
+		enum msm_isp_camif_update_state update_state);
+	void (*cfg_rdi_reg)(struct vfe_device *vfe_dev,
+		struct msm_vfe_rdi_cfg *rdi_cfg,
+		enum msm_vfe_input_src input_src);
+	int (*get_platform_data)(struct vfe_device *vfe_dev);
+	void (*get_error_mask)(uint32_t *error_mask0, uint32_t *error_mask1);
+	void (*process_error_status)(struct vfe_device *vfe_dev);
+	void (*get_overflow_mask)(uint32_t *overflow_mask);
+	void (*get_irq_mask)(struct vfe_device *vfe_dev,
+		uint32_t *irq0_mask, uint32_t *irq1_mask);
+	void (*restore_irq_mask)(struct vfe_device *vfe_dev);
+	void (*get_halt_restart_mask)(uint32_t *irq0_mask,
+		uint32_t *irq1_mask);
+	void (*get_rdi_wm_mask)(struct vfe_device *vfe_dev,
+		uint32_t *rdi_wm_mask);
+};
+struct msm_vfe_stats_ops {
+	int (*get_stats_idx)(enum msm_isp_stats_type stats_type);
+	int (*check_streams)(struct msm_vfe_stats_stream *stream_info);
+	void (*cfg_framedrop)(struct vfe_device *vfe_dev,
+		struct msm_vfe_stats_stream *stream_info);
+	void (*clear_framedrop)(struct vfe_device *vfe_dev,
+		struct msm_vfe_stats_stream *stream_info);
+	void (*cfg_comp_mask)(struct vfe_device *vfe_dev,
+		uint32_t stats_mask, uint8_t enable);
+	void (*cfg_wm_irq_mask)(struct vfe_device *vfe_dev,
+		struct msm_vfe_stats_stream *stream_info);
+	void (*clear_wm_irq_mask)(struct vfe_device *vfe_dev,
+		struct msm_vfe_stats_stream *stream_info);
+
+	void (*cfg_wm_reg)(struct vfe_device *vfe_dev,
+		struct msm_vfe_stats_stream *stream_info);
+	void (*clear_wm_reg)(struct vfe_device *vfe_dev,
+		struct msm_vfe_stats_stream *stream_info);
+
+	void (*cfg_ub)(struct vfe_device *vfe_dev);
+
+	void (*enable_module)(struct vfe_device *vfe_dev,
+		uint32_t stats_mask, uint8_t enable);
+
+	void (*update_ping_pong_addr)(struct vfe_device *vfe_dev,
+		struct msm_vfe_stats_stream *stream_info,
+		uint32_t pingpong_status, dma_addr_t paddr);
+
+	uint32_t (*get_frame_id)(struct vfe_device *vfe_dev);
+	uint32_t (*get_wm_mask)(uint32_t irq_status0, uint32_t irq_status1);
+	uint32_t (*get_comp_mask)(uint32_t irq_status0, uint32_t irq_status1);
+	uint32_t (*get_pingpong_status)(struct vfe_device *vfe_dev);
+
+	void (*update_cgc_override)(struct vfe_device *vfe_dev,
+		uint32_t stats_mask, uint8_t enable);
+};
+
+struct msm_vfe_ops {
+	struct msm_vfe_irq_ops irq_ops;
+	struct msm_vfe_axi_ops axi_ops;
+	struct msm_vfe_core_ops core_ops;
+	struct msm_vfe_stats_ops stats_ops;
+};
+
+struct msm_vfe_hardware_info {
+	int num_iommu_ctx;
+	/* secure iommu ctx nums */
+	int num_iommu_secure_ctx;
+	int vfe_clk_idx;
+	struct msm_vfe_ops vfe_ops;
+	struct msm_vfe_axi_hardware_info *axi_hw_info;
+	struct msm_vfe_stats_hardware_info *stats_hw_info;
+	struct v4l2_subdev_internal_ops *subdev_internal_ops;
+	struct v4l2_subdev_ops *subdev_ops;
+	uint32_t dmi_reg_offset;
+};
+
+struct msm_vfe_axi_hardware_info {
+	uint8_t num_wm;
+	uint8_t num_rdi;
+	uint8_t num_rdi_master;
+	uint8_t num_comp_mask;
+	uint32_t min_wm_ub;
+	uint32_t scratch_buf_range;
+};
+
+enum msm_vfe_axi_state {
+	AVAILABLE,
+	INACTIVE,
+	ACTIVE,
+	PAUSED,
+	START_PENDING,
+	STOP_PENDING,
+	PAUSE_PENDING,
+	RESUME_PENDING,
+	STARTING,
+	STOPPING,
+	PAUSING,
+	RESUMING,
+};
+
+enum msm_vfe_axi_cfg_update_state {
+	NO_AXI_CFG_UPDATE,
+	APPLYING_UPDATE_RESUME,
+	UPDATE_REQUESTED,
+};
+
+#define VFE_NO_DROP	       0xFFFFFFFF
+#define VFE_DROP_EVERY_2FRAME  0x55555555
+#define VFE_DROP_EVERY_4FRAME  0x11111111
+#define VFE_DROP_EVERY_8FRAME  0x01010101
+#define VFE_DROP_EVERY_16FRAME 0x00010001
+#define VFE_DROP_EVERY_32FRAME 0x00000001
+
+enum msm_vfe_axi_stream_type {
+	CONTINUOUS_STREAM,
+	BURST_STREAM,
+};
+
+struct msm_vfe_axi_stream {
+	uint32_t frame_id;
+	enum msm_vfe_axi_state state;
+	enum msm_vfe_axi_stream_src stream_src;
+	uint8_t num_planes;
+	uint8_t wm[MAX_PLANES_PER_STREAM];
+	uint32_t output_format;/*Planar/RAW/Misc*/
+	struct msm_vfe_axi_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
+	uint8_t comp_mask_index;
+	struct msm_isp_buffer *buf[2];
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t bufq_handle;
+	uint32_t bufq_scratch_handle;
+	uint32_t controllable_output;
+	uint32_t stream_handle;
+	uint32_t request_frm_num;
+	uint8_t buf_divert;
+	enum msm_vfe_axi_stream_type stream_type;
+	uint32_t frame_based;
+	enum msm_vfe_frame_skip_pattern frame_skip_pattern;
+	uint32_t framedrop_period;
+	uint32_t framedrop_pattern;
+	uint32_t num_burst_capture;/*number of frame to capture*/
+	uint32_t init_frame_drop;
+	uint32_t burst_frame_count;/*number of sof before burst stop*/
+	uint8_t framedrop_update;
+	spinlock_t lock;
+
+	/*Bandwidth calculation info*/
+	uint32_t max_width;
+	/*Based on format plane size in Q2. e.g NV12 = 1.5*/
+	uint32_t format_factor;
+	uint32_t bandwidth;
+
+	/*Run time update variables*/
+	uint32_t runtime_init_frame_drop;
+	uint32_t runtime_burst_frame_count;/*number of sof before burst stop*/
+	uint32_t runtime_num_burst_capture;
+	uint8_t  runtime_framedrop_update;
+	uint8_t  runtime_framedrop_update_burst;
+	uint32_t runtime_output_format;
+	enum msm_stream_memory_input_t  memory_input;
+};
+
+struct msm_vfe_axi_composite_info {
+	uint32_t stream_handle;
+	uint32_t stream_composite_mask;
+};
+
+struct msm_vfe_src_info {
+	uint32_t frame_id;
+	uint8_t active;
+	uint8_t pix_stream_count;
+	uint8_t raw_stream_count;
+	enum msm_vfe_inputmux input_mux;
+	uint32_t width;
+	long pixel_clock;
+	uint32_t input_format;/*V4L2 pix format with bayer pattern*/
+	uint32_t last_updt_frm_id;
+};
+
+struct msm_vfe_fetch_engine_info {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t bufq_handle;
+	uint32_t buf_idx;
+	uint8_t is_busy;
+};
+
+enum msm_wm_ub_cfg_type {
+	MSM_WM_UB_CFG_DEFAULT,
+	MSM_WM_UB_EQUAL_SLICING,
+	MSM_WM_UB_CFG_MAX_NUM
+};
+
+struct msm_vfe_axi_shared_data {
+	struct msm_vfe_axi_hardware_info *hw_info;
+	struct msm_vfe_axi_stream stream_info[VFE_AXI_SRC_MAX];
+	uint32_t free_wm[MAX_NUM_WM];
+	uint32_t wm_image_size[MAX_NUM_WM];
+	enum msm_wm_ub_cfg_type wm_ub_cfg_policy;
+	uint8_t num_used_wm;
+	uint8_t num_active_stream;
+	uint8_t num_rdi_stream;
+	uint8_t num_pix_stream;
+	uint32_t rdi_wm_mask;
+	struct msm_vfe_axi_composite_info
+	composite_info[MAX_NUM_COMPOSITE_MASK];
+	uint8_t num_used_composite_mask;
+	uint32_t stream_update;
+	atomic_t axi_cfg_update;
+	enum msm_isp_camif_update_state pipeline_update;
+	struct msm_vfe_src_info src_info[VFE_SRC_MAX];
+	uint16_t stream_handle_cnt;
+	uint32_t event_mask;
+};
+
+struct msm_vfe_stats_hardware_info {
+	uint32_t stats_capability_mask;
+	uint8_t *stats_ping_pong_offset;
+	uint8_t num_stats_type;
+	uint8_t num_stats_comp_mask;
+};
+
+enum msm_vfe_stats_state {
+	STATS_AVAILABLE,
+	STATS_INACTIVE,
+	STATS_ACTIVE,
+	STATS_START_PENDING,
+	STATS_STOP_PENDING,
+	STATS_STARTING,
+	STATS_STOPPING,
+};
+
+struct msm_vfe_stats_stream {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t stream_handle;
+	uint32_t composite_flag;
+	enum msm_isp_stats_type stats_type;
+	enum msm_vfe_stats_state state;
+	uint32_t framedrop_pattern;
+	uint32_t framedrop_period;
+	uint32_t irq_subsample_pattern;
+	uint32_t init_stats_frame_drop;
+
+	uint32_t buffer_offset;
+	struct msm_isp_buffer *buf[2];
+	uint32_t bufq_handle;
+};
+
+struct msm_vfe_stats_shared_data {
+	struct msm_vfe_stats_stream stream_info[MSM_ISP_STATS_MAX];
+	uint8_t num_active_stream;
+	atomic_t stats_comp_mask[MAX_NUM_STATS_COMP_MASK];
+	uint32_t reg_mask;
+	uint16_t stream_handle_cnt;
+	atomic_t stats_update;
+};
+
+struct msm_vfe_tasklet_queue_cmd {
+	struct list_head list;
+	uint32_t vfeInterruptStatus0;
+	uint32_t vfeInterruptStatus1;
+	struct msm_isp_timestamp ts;
+	uint8_t cmd_used;
+};
+
+#define MSM_VFE_TASKLETQ_SIZE 200
+
+enum msm_vfe_overflow_state {
+	NO_OVERFLOW,
+	OVERFLOW_DETECTED,
+	HALT_REQUESTED,
+	RESTART_REQUESTED,
+};
+
+struct msm_vfe_error_info {
+	atomic_t overflow_state;
+	uint32_t overflow_recover_irq_mask0;
+	uint32_t overflow_recover_irq_mask1;
+	uint32_t error_mask0;
+	uint32_t error_mask1;
+	uint32_t violation_status;
+	uint32_t camif_status;
+	uint32_t stream_framedrop_count[MAX_NUM_STREAM];
+	uint32_t stats_framedrop_count[MSM_ISP_STATS_MAX];
+	uint32_t info_dump_frame_count;
+	uint32_t error_count;
+};
+
+struct msm_isp_statistics {
+	int64_t imagemaster0_overflow;
+	int64_t imagemaster1_overflow;
+	int64_t imagemaster2_overflow;
+	int64_t imagemaster3_overflow;
+	int64_t imagemaster4_overflow;
+	int64_t imagemaster5_overflow;
+	int64_t imagemaster6_overflow;
+	int64_t be_overflow;
+	int64_t bg_overflow;
+	int64_t bf_overflow;
+	int64_t awb_overflow;
+	int64_t rs_overflow;
+	int64_t cs_overflow;
+	int64_t ihist_overflow;
+	int64_t skinbhist_overflow;
+	int64_t bfscale_overflow;
+
+	int64_t isp_vfe0_active;
+	int64_t isp_vfe0_ab;
+	int64_t isp_vfe0_ib;
+
+	int64_t isp_vfe1_active;
+	int64_t isp_vfe1_ab;
+	int64_t isp_vfe1_ib;
+
+	int64_t isp_cpp_active;
+	int64_t isp_cpp_ab;
+	int64_t isp_cpp_ib;
+
+	int64_t last_overflow_ab;
+	int64_t last_overflow_ib;
+
+	int64_t vfe_clk_rate;
+	int64_t cpp_clk_rate;
+};
+
+enum msm_isp_hw_client {
+	ISP_VFE0,
+	ISP_VFE1,
+	ISP_CPP,
+	MAX_ISP_CLIENT,
+};
+
+struct msm_isp_bandwidth_info {
+	uint32_t active;
+	uint64_t ab;
+	uint64_t ib;
+};
+
+struct msm_isp_bw_req_info {
+	uint32_t client;
+	unsigned long long timestamp;
+	uint64_t total_ab;
+	uint64_t total_ib;
+	struct msm_isp_bandwidth_info client_info[MAX_ISP_CLIENT];
+};
+
+#define MSM_ISP_MAX_WM 7
+struct msm_isp_ub_info {
+	enum msm_wm_ub_cfg_type policy;
+	uint8_t num_wm;
+	uint32_t wm_ub;
+	uint32_t data[MSM_ISP_MAX_WM];
+	uint64_t addr[MSM_ISP_MAX_WM];
+};
+
+struct msm_vfe_hw_init_parms {
+	const char *entries;
+	const char *regs;
+	const char *settings;
+};
+
+struct vfe_device {
+	struct platform_device *pdev;
+	struct msm_sd_subdev subdev;
+	struct resource *vfe_irq;
+	struct resource *vfe_mem;
+	struct resource *vfe_vbif_mem;
+	struct resource *vfe_io;
+	struct resource *vfe_vbif_io;
+	void __iomem *vfe_base;
+	void __iomem *vfe_vbif_base;
+
+	struct device *iommu_ctx[MAX_IOMMU_CTX];
+	/*Add secure context banks*/
+	struct device *iommu_secure_ctx[MAX_IOMMU_CTX];
+
+	struct regulator *fs_vfe;
+	struct clk **vfe_clk;
+	uint32_t num_clk;
+
+	uint32_t bus_perf_client;
+
+	struct completion reset_complete;
+	struct completion halt_complete;
+	struct completion stream_config_complete;
+	struct completion stats_config_complete;
+	struct mutex realtime_mutex;
+	struct mutex core_mutex;
+
+	atomic_t irq_cnt;
+	uint8_t taskletq_idx;
+	spinlock_t  tasklet_lock;
+	spinlock_t  shared_data_lock;
+	struct list_head tasklet_q;
+	struct tasklet_struct vfe_tasklet;
+	struct msm_vfe_tasklet_queue_cmd
+		tasklet_queue_cmd[MSM_VFE_TASKLETQ_SIZE];
+
+	uint32_t vfe_hw_version;
+	struct msm_vfe_hardware_info *hw_info;
+	struct msm_vfe_axi_shared_data axi_data;
+	struct msm_vfe_stats_shared_data stats_data;
+	struct msm_vfe_error_info error_info;
+	struct msm_isp_buf_mgr *buf_mgr;
+	int dump_reg;
+	int vfe_clk_idx;
+	uint32_t vfe_open_cnt;
+	uint8_t vt_enable;
+	uint8_t ignore_error;
+	struct msm_isp_statistics *stats;
+	struct msm_vfe_fetch_engine_info fetch_engine_info;
+	uint64_t msm_isp_last_overflow_ab;
+	uint64_t msm_isp_last_overflow_ib;
+	uint64_t msm_isp_vfe_clk_rate;
+	struct msm_isp_ub_info *ub_info;
+	uint32_t vfe_ub_policy;
+	uint32_t isp_sof_debug;
+	uint8_t reset_pending;
+	uint32_t bus_util_factor;
+	uint8_t vfe_reset_timeout_processed;
+};
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 0b84327..f50e257 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -3847,7 +3847,7 @@
 
 int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
 {
-	int rc = 0, i;
+	int rc = 0, i, j, k;
 	struct msm_vfe_axi_stream *stream_info;
 	struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
 	struct msm_vfe_axi_stream_cfg_update_info *update_info = NULL;
@@ -4126,8 +4126,16 @@
 			}
 			vfe_idx = msm_isp_get_vfe_idx_for_stream(
 				vfe_dev, stream_info);
-			msm_isp_stream_axi_cfg_update(vfe_dev, stream_info,
-				update_info);
+			for (j = 0; j < stream_info->num_planes; j++) {
+				stream_info->plane_cfg[vfe_idx][j] =
+					update_info->plane_cfg[j];
+				for (k = 0; k < stream_info->num_isp; k++) {
+					vfe_dev = stream_info->vfe_dev[k];
+					vfe_dev->hw_info->vfe_ops.axi_ops.
+						cfg_wm_reg(vfe_dev,
+						stream_info, j);
+				}
+			}
 		}
 		break;
 	}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.c
new file mode 100644
index 0000000..55a10ca
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.c
@@ -0,0 +1,2095 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <asm/div64.h>
+#include "msm_isp_util_32.h"
+#include "msm_isp_axi_util_32.h"
+
+#define SRC_TO_INTF(src) \
+	((src < RDI_INTF_0 || src == VFE_AXI_SRC_MAX) ? VFE_PIX_0 : \
+	(VFE_RAW_0 + src - RDI_INTF_0))
+
+#define HANDLE_TO_IDX(handle) (handle & 0xFF)
+
+int msm_isp_axi_create_stream(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd)
+{
+	uint32_t i = stream_cfg_cmd->stream_src;
+
+	if (i >= VFE_AXI_SRC_MAX) {
+		pr_err("%s:%d invalid stream_src %d\n", __func__, __LINE__,
+			stream_cfg_cmd->stream_src);
+		return -EINVAL;
+	}
+
+	if ((axi_data->stream_handle_cnt << 8) == 0)
+		axi_data->stream_handle_cnt++;
+
+	stream_cfg_cmd->axi_stream_handle =
+		(++axi_data->stream_handle_cnt) << 8 | i;
+
+	memset(&axi_data->stream_info[i], 0,
+		   sizeof(struct msm_vfe_axi_stream));
+	spin_lock_init(&axi_data->stream_info[i].lock);
+	axi_data->stream_info[i].session_id = stream_cfg_cmd->session_id;
+	axi_data->stream_info[i].stream_id = stream_cfg_cmd->stream_id;
+	axi_data->stream_info[i].buf_divert = stream_cfg_cmd->buf_divert;
+	axi_data->stream_info[i].state = INACTIVE;
+	axi_data->stream_info[i].stream_handle =
+		stream_cfg_cmd->axi_stream_handle;
+	axi_data->stream_info[i].controllable_output =
+		stream_cfg_cmd->controllable_output;
+	if (stream_cfg_cmd->controllable_output)
+		stream_cfg_cmd->frame_skip_pattern = SKIP_ALL;
+	return 0;
+}
+
+void msm_isp_axi_destroy_stream(
+	struct msm_vfe_axi_shared_data *axi_data, int stream_idx)
+{
+	if (axi_data->stream_info[stream_idx].state != AVAILABLE) {
+		axi_data->stream_info[stream_idx].state = AVAILABLE;
+		axi_data->stream_info[stream_idx].stream_handle = 0;
+	} else {
+		pr_err("%s: stream does not exist\n", __func__);
+	}
+}
+
+int msm_isp_validate_axi_request(struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd)
+{
+	int rc = -1, i;
+	struct msm_vfe_axi_stream *stream_info = NULL;
+
+	if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle) < MAX_NUM_STREAM) {
+		stream_info = &axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
+	} else {
+		pr_err("%s: Invalid axi_stream_handle\n", __func__);
+		return rc;
+	}
+
+	if (!stream_info) {
+		pr_err("%s: Stream info is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (stream_cfg_cmd->output_format) {
+	case V4L2_PIX_FMT_YUYV:
+	case V4L2_PIX_FMT_YVYU:
+	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_VYUY:
+	case V4L2_PIX_FMT_SBGGR8:
+	case V4L2_PIX_FMT_SGBRG8:
+	case V4L2_PIX_FMT_SGRBG8:
+	case V4L2_PIX_FMT_SRGGB8:
+	case V4L2_PIX_FMT_SBGGR10:
+	case V4L2_PIX_FMT_SGBRG10:
+	case V4L2_PIX_FMT_SGRBG10:
+	case V4L2_PIX_FMT_SRGGB10:
+	case V4L2_PIX_FMT_SBGGR12:
+	case V4L2_PIX_FMT_SGBRG12:
+	case V4L2_PIX_FMT_SGRBG12:
+	case V4L2_PIX_FMT_SRGGB12:
+	case V4L2_PIX_FMT_SBGGR14:
+	case V4L2_PIX_FMT_SGBRG14:
+	case V4L2_PIX_FMT_SGRBG14:
+	case V4L2_PIX_FMT_SRGGB14:
+	case V4L2_PIX_FMT_QBGGR8:
+	case V4L2_PIX_FMT_QGBRG8:
+	case V4L2_PIX_FMT_QGRBG8:
+	case V4L2_PIX_FMT_QRGGB8:
+	case V4L2_PIX_FMT_QBGGR10:
+	case V4L2_PIX_FMT_QGBRG10:
+	case V4L2_PIX_FMT_QGRBG10:
+	case V4L2_PIX_FMT_QRGGB10:
+	case V4L2_PIX_FMT_QBGGR12:
+	case V4L2_PIX_FMT_QGBRG12:
+	case V4L2_PIX_FMT_QGRBG12:
+	case V4L2_PIX_FMT_QRGGB12:
+	case V4L2_PIX_FMT_QBGGR14:
+	case V4L2_PIX_FMT_QGBRG14:
+	case V4L2_PIX_FMT_QGRBG14:
+	case V4L2_PIX_FMT_QRGGB14:
+	case V4L2_PIX_FMT_P16BGGR10:
+	case V4L2_PIX_FMT_P16GBRG10:
+	case V4L2_PIX_FMT_P16GRBG10:
+	case V4L2_PIX_FMT_P16RGGB10:
+	case V4L2_PIX_FMT_JPEG:
+	case V4L2_PIX_FMT_META:
+		stream_info->num_planes = 1;
+		stream_info->format_factor = ISP_Q2;
+		break;
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV21:
+	case V4L2_PIX_FMT_NV14:
+	case V4L2_PIX_FMT_NV41:
+	case V4L2_PIX_FMT_NV16:
+	case V4L2_PIX_FMT_NV61:
+		stream_info->num_planes = 2;
+		stream_info->format_factor = 1.5 * ISP_Q2;
+		break;
+	/*TD: Add more image format*/
+	default:
+		msm_isp_print_fourcc_error(__func__,
+				stream_cfg_cmd->output_format);
+		return rc;
+	}
+
+	if (axi_data->hw_info->num_wm - axi_data->num_used_wm <
+		stream_info->num_planes) {
+		pr_err("%s: No free write masters\n", __func__);
+		return rc;
+	}
+
+	if ((stream_info->num_planes > 1) &&
+			(axi_data->hw_info->num_comp_mask -
+			axi_data->num_used_composite_mask < 1)) {
+		pr_err("%s: No free composite mask\n", __func__);
+		return rc;
+	}
+
+	if (stream_cfg_cmd->init_frame_drop >= MAX_INIT_FRAME_DROP) {
+		pr_err("%s: Invalid skip pattern\n", __func__);
+		return rc;
+	}
+
+	if (stream_cfg_cmd->frame_skip_pattern >= MAX_SKIP) {
+		pr_err("%s: Invalid skip pattern\n", __func__);
+		return rc;
+	}
+
+	for (i = 0; i < stream_info->num_planes; i++) {
+		stream_info->plane_cfg[i] = stream_cfg_cmd->plane_cfg[i];
+		stream_info->max_width = max(stream_info->max_width,
+			stream_cfg_cmd->plane_cfg[i].output_width);
+	}
+
+	stream_info->output_format = stream_cfg_cmd->output_format;
+	stream_info->runtime_output_format = stream_info->output_format;
+	stream_info->stream_src = stream_cfg_cmd->stream_src;
+	stream_info->frame_based = stream_cfg_cmd->frame_base;
+	return 0;
+}
+
+static uint32_t msm_isp_axi_get_plane_size(
+	struct msm_vfe_axi_stream *stream_info, int plane_idx)
+{
+	uint32_t size = 0;
+	struct msm_vfe_axi_plane_cfg *plane_cfg = stream_info->plane_cfg;
+
+	switch (stream_info->output_format) {
+	case V4L2_PIX_FMT_YUYV:
+	case V4L2_PIX_FMT_YVYU:
+	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_VYUY:
+	case V4L2_PIX_FMT_SBGGR8:
+	case V4L2_PIX_FMT_SGBRG8:
+	case V4L2_PIX_FMT_SGRBG8:
+	case V4L2_PIX_FMT_SRGGB8:
+	case V4L2_PIX_FMT_QBGGR8:
+	case V4L2_PIX_FMT_QGBRG8:
+	case V4L2_PIX_FMT_QGRBG8:
+	case V4L2_PIX_FMT_QRGGB8:
+	case V4L2_PIX_FMT_JPEG:
+	case V4L2_PIX_FMT_META:
+		size = plane_cfg[plane_idx].output_height *
+		plane_cfg[plane_idx].output_width;
+		break;
+	case V4L2_PIX_FMT_SBGGR10:
+	case V4L2_PIX_FMT_SGBRG10:
+	case V4L2_PIX_FMT_SGRBG10:
+	case V4L2_PIX_FMT_SRGGB10:
+	case V4L2_PIX_FMT_QBGGR10:
+	case V4L2_PIX_FMT_QGBRG10:
+	case V4L2_PIX_FMT_QGRBG10:
+	case V4L2_PIX_FMT_QRGGB10:
+		/* TODO: fix me */
+		size = plane_cfg[plane_idx].output_height *
+		plane_cfg[plane_idx].output_width;
+		break;
+	case V4L2_PIX_FMT_SBGGR12:
+	case V4L2_PIX_FMT_SGBRG12:
+	case V4L2_PIX_FMT_SGRBG12:
+	case V4L2_PIX_FMT_SRGGB12:
+	case V4L2_PIX_FMT_QBGGR12:
+	case V4L2_PIX_FMT_QGBRG12:
+	case V4L2_PIX_FMT_QGRBG12:
+	case V4L2_PIX_FMT_QRGGB12:
+	case V4L2_PIX_FMT_SBGGR14:
+	case V4L2_PIX_FMT_SGBRG14:
+	case V4L2_PIX_FMT_SGRBG14:
+	case V4L2_PIX_FMT_SRGGB14:
+	case V4L2_PIX_FMT_QBGGR14:
+	case V4L2_PIX_FMT_QGBRG14:
+	case V4L2_PIX_FMT_QGRBG14:
+	case V4L2_PIX_FMT_QRGGB14:
+		/* TODO: fix me */
+		size = plane_cfg[plane_idx].output_height *
+		plane_cfg[plane_idx].output_width;
+		break;
+	case V4L2_PIX_FMT_P16BGGR10:
+	case V4L2_PIX_FMT_P16GBRG10:
+	case V4L2_PIX_FMT_P16GRBG10:
+	case V4L2_PIX_FMT_P16RGGB10:
+		size = plane_cfg[plane_idx].output_height *
+		plane_cfg[plane_idx].output_width;
+		break;
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV21:
+		if (plane_cfg[plane_idx].output_plane_format == Y_PLANE)
+			size = plane_cfg[plane_idx].output_height *
+				plane_cfg[plane_idx].output_width;
+		else
+			size = plane_cfg[plane_idx].output_height *
+				plane_cfg[plane_idx].output_width;
+		break;
+	case V4L2_PIX_FMT_NV14:
+	case V4L2_PIX_FMT_NV41:
+		if (plane_cfg[plane_idx].output_plane_format == Y_PLANE)
+			size = plane_cfg[plane_idx].output_height *
+				plane_cfg[plane_idx].output_width;
+		else
+			size = plane_cfg[plane_idx].output_height *
+				plane_cfg[plane_idx].output_width;
+		break;
+	case V4L2_PIX_FMT_NV16:
+	case V4L2_PIX_FMT_NV61:
+		size = plane_cfg[plane_idx].output_height *
+			plane_cfg[plane_idx].output_width;
+		break;
+	/*TD: Add more image format*/
+	default:
+		msm_isp_print_fourcc_error(__func__,
+				stream_info->output_format);
+		break;
+	}
+	return size;
+}
+
+void msm_isp_axi_reserve_wm(struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe_axi_stream *stream_info)
+{
+	int i, j;
+
+	for (i = 0; i < stream_info->num_planes; i++) {
+		for (j = 0; j < axi_data->hw_info->num_wm; j++) {
+			if (!axi_data->free_wm[j]) {
+				axi_data->free_wm[j] =
+					stream_info->stream_handle;
+				axi_data->wm_image_size[j] =
+					msm_isp_axi_get_plane_size(
+						stream_info, i);
+				axi_data->num_used_wm++;
+				break;
+			}
+		}
+		stream_info->wm[i] = j;
+	}
+}
+
+void msm_isp_axi_free_wm(struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe_axi_stream *stream_info)
+{
+	int i;
+
+	for (i = 0; i < stream_info->num_planes; i++) {
+		axi_data->free_wm[stream_info->wm[i]] = 0;
+		axi_data->num_used_wm--;
+	}
+	if (stream_info->stream_src <= IDEAL_RAW)
+		axi_data->num_pix_stream++;
+	else if (stream_info->stream_src < VFE_AXI_SRC_MAX)
+		axi_data->num_rdi_stream++;
+}
+
+void msm_isp_axi_reserve_comp_mask(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe_axi_stream *stream_info)
+{
+	int i;
+	uint8_t comp_mask = 0;
+
+	for (i = 0; i < stream_info->num_planes; i++)
+		comp_mask |= 1 << stream_info->wm[i];
+
+	for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) {
+		if (!axi_data->composite_info[i].stream_handle) {
+			axi_data->composite_info[i].stream_handle =
+				stream_info->stream_handle;
+			axi_data->composite_info[i].
+				stream_composite_mask = comp_mask;
+			axi_data->num_used_composite_mask++;
+			break;
+		}
+	}
+	stream_info->comp_mask_index = i;
+}
+
+static void msm_isp_axi_free_comp_mask(struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe_axi_stream *stream_info)
+{
+	axi_data->composite_info[stream_info->comp_mask_index].
+		stream_composite_mask = 0;
+	axi_data->composite_info[stream_info->comp_mask_index].
+		stream_handle = 0;
+	axi_data->num_used_composite_mask--;
+}
+
+static int msm_isp_axi_get_bufq_handles(
+		struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info)
+{
+	int rc = 0;
+
+	if (stream_info->stream_id & ISP_SCRATCH_BUF_BIT) {
+		stream_info->bufq_handle =
+			vfe_dev->buf_mgr->ops->get_bufq_handle(
+		vfe_dev->buf_mgr, stream_info->session_id,
+		stream_info->stream_id & ~ISP_SCRATCH_BUF_BIT);
+		if (stream_info->bufq_handle == 0) {
+			pr_err("%s: Stream 0x%x has no valid buffer queue\n",
+				__func__, (unsigned int)stream_info->stream_id);
+			rc = -EINVAL;
+			return rc;
+		}
+
+		stream_info->bufq_scratch_handle =
+			vfe_dev->buf_mgr->ops->get_bufq_handle(
+		vfe_dev->buf_mgr, stream_info->session_id,
+		stream_info->stream_id);
+		if (stream_info->bufq_scratch_handle == 0) {
+			pr_err("%s: Stream 0x%x has no valid buffer queue\n",
+				__func__, (unsigned int)stream_info->stream_id);
+			rc = -EINVAL;
+			return rc;
+		}
+	} else {
+		stream_info->bufq_handle =
+			vfe_dev->buf_mgr->ops->get_bufq_handle(
+		vfe_dev->buf_mgr, stream_info->session_id,
+		stream_info->stream_id);
+		if (stream_info->bufq_handle == 0) {
+			pr_err("%s: Stream 0x%x has no valid buffer queue\n",
+				__func__, (unsigned int)stream_info->stream_id);
+			rc = -EINVAL;
+			return rc;
+		}
+	}
+	return rc;
+}
+
+int msm_isp_axi_check_stream_state(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+{
+	int rc = 0, i;
+	unsigned long flags;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	struct msm_vfe_axi_stream *stream_info;
+	enum msm_vfe_axi_state valid_state =
+		(stream_cfg_cmd->cmd == START_STREAM) ? INACTIVE : ACTIVE;
+
+	if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+		return -EINVAL;
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+			MAX_NUM_STREAM) {
+			return -EINVAL;
+		}
+		stream_info = &axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+		spin_lock_irqsave(&stream_info->lock, flags);
+		if (stream_info->state != valid_state) {
+			if ((stream_info->state == PAUSING ||
+				stream_info->state == PAUSED ||
+				stream_info->state == RESUME_PENDING ||
+				stream_info->state == RESUMING) &&
+				(stream_cfg_cmd->cmd == STOP_STREAM ||
+				stream_cfg_cmd->cmd == STOP_IMMEDIATELY)) {
+				stream_info->state = ACTIVE;
+			} else {
+				pr_err("%s: Invalid stream state: %d\n",
+					__func__, stream_info->state);
+				spin_unlock_irqrestore(
+					&stream_info->lock, flags);
+				rc = -EINVAL;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&stream_info->lock, flags);
+
+		if (stream_cfg_cmd->cmd == START_STREAM) {
+			rc = msm_isp_axi_get_bufq_handles(vfe_dev, stream_info);
+			if (rc)
+				break;
+		}
+	}
+	return rc;
+}
+
+void msm_isp_update_framedrop_reg(struct vfe_device *vfe_dev,
+	uint8_t input_src)
+{
+	int i;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	struct msm_vfe_axi_stream *stream_info;
+
+	for (i = 0; i < MAX_NUM_STREAM; i++) {
+		stream_info = &axi_data->stream_info[i];
+		if (stream_info->state != ACTIVE)
+			continue;
+
+		if (stream_info->runtime_framedrop_update) {
+			stream_info->runtime_init_frame_drop--;
+			if (stream_info->runtime_init_frame_drop == 0) {
+				stream_info->runtime_framedrop_update = 0;
+				vfe_dev->hw_info->vfe_ops.axi_ops.
+				cfg_framedrop(vfe_dev, stream_info);
+			}
+		}
+		if (stream_info->stream_type == BURST_STREAM &&
+			((1 << SRC_TO_INTF(stream_info->stream_src)) &
+			input_src)) {
+			if (stream_info->runtime_framedrop_update_burst) {
+				stream_info->runtime_framedrop_update_burst = 0;
+				stream_info->runtime_burst_frame_count =
+				    stream_info->runtime_init_frame_drop +
+				    (stream_info->runtime_num_burst_capture -
+					1) *
+				    (stream_info->framedrop_period + 1) + 1;
+				vfe_dev->hw_info->vfe_ops.axi_ops.
+					cfg_framedrop(vfe_dev, stream_info);
+			} else {
+				stream_info->runtime_burst_frame_count--;
+				if (stream_info->
+				    runtime_burst_frame_count == 0) {
+					vfe_dev->hw_info->vfe_ops.axi_ops.
+					cfg_framedrop(vfe_dev, stream_info);
+				}
+			}
+		}
+	}
+}
+
+void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
+			struct msm_vfe_axi_stream *stream_info)
+{
+	stream_info->runtime_init_frame_drop = stream_info->init_frame_drop;
+	stream_info->runtime_burst_frame_count =
+		stream_info->burst_frame_count;
+	stream_info->runtime_num_burst_capture =
+		stream_info->num_burst_capture;
+	stream_info->runtime_framedrop_update = stream_info->framedrop_update;
+	vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(vfe_dev, stream_info);
+}
+
+void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
+	enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts)
+{
+	struct msm_isp32_event_data event_data;
+
+	memset(&event_data, 0, sizeof(event_data));
+	switch (event_type) {
+	case ISP_EVENT_SOF:
+		if ((frame_src == VFE_PIX_0) && (vfe_dev->isp_sof_debug < 5)) {
+			pr_err("%s: PIX0 frame id: %u\n", __func__,
+				vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+		vfe_dev->isp_sof_debug++;
+		}
+		vfe_dev->axi_data.src_info[frame_src].frame_id++;
+		if (vfe_dev->axi_data.src_info[frame_src].frame_id == 0)
+			vfe_dev->axi_data.src_info[frame_src].frame_id = 1;
+		ISP_DBG("%s: frame_src %d frame id: %u\n", __func__,
+			frame_src,
+			vfe_dev->axi_data.src_info[frame_src].frame_id);
+		break;
+	case ISP_EVENT_REG_UPDATE:
+		vfe_dev->axi_data.src_info[frame_src].last_updt_frm_id = 0;
+		break;
+	default:
+		break;
+	}
+
+	event_data.input_intf = frame_src;
+	event_data.frame_id = vfe_dev->axi_data.src_info[frame_src].frame_id;
+	event_data.timestamp = ts->event_time;
+	event_data.mono_timestamp = ts->buf_time;
+	msm_isp_send_event(vfe_dev, event_type | frame_src, &event_data);
+}
+
+void msm_isp_calculate_framedrop(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd)
+{
+	uint32_t framedrop_period = 0;
+	struct msm_vfe_axi_stream *stream_info = NULL;
+
+	if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle) < MAX_NUM_STREAM) {
+		stream_info = &axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
+	} else {
+		pr_err("%s: Invalid stream handle", __func__);
+		return;
+	}
+	if (!stream_info) {
+		pr_err("%s: Stream info is NULL\n", __func__);
+		return;
+	}
+
+	framedrop_period = msm_isp_get_framedrop_period(
+	   stream_cfg_cmd->frame_skip_pattern);
+	stream_info->frame_skip_pattern =
+		stream_cfg_cmd->frame_skip_pattern;
+	if (stream_cfg_cmd->frame_skip_pattern == SKIP_ALL)
+		stream_info->framedrop_pattern = 0x0;
+	else
+		stream_info->framedrop_pattern = 0x1;
+	stream_info->framedrop_period = framedrop_period - 1;
+
+	if (stream_cfg_cmd->init_frame_drop < framedrop_period) {
+		stream_info->framedrop_pattern <<=
+			stream_cfg_cmd->init_frame_drop;
+		stream_info->init_frame_drop = 0;
+		stream_info->framedrop_update = 0;
+	} else {
+		stream_info->init_frame_drop = stream_cfg_cmd->init_frame_drop;
+		stream_info->framedrop_update = 1;
+	}
+
+	if (stream_cfg_cmd->burst_count > 0) {
+		stream_info->stream_type = BURST_STREAM;
+		stream_info->num_burst_capture =
+			stream_cfg_cmd->burst_count;
+		stream_info->burst_frame_count =
+		stream_cfg_cmd->init_frame_drop +
+			(stream_cfg_cmd->burst_count - 1) *
+			framedrop_period + 1;
+	} else {
+		stream_info->stream_type = CONTINUOUS_STREAM;
+		stream_info->burst_frame_count = 0;
+		stream_info->num_burst_capture = 0;
+	}
+}
+
+static void msm_isp_calculate_bandwidth(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe_axi_stream *stream_info)
+{
+	int bpp = 0;
+
+	if (stream_info->stream_src < RDI_INTF_0) {
+		stream_info->bandwidth =
+			(axi_data->src_info[VFE_PIX_0].pixel_clock /
+			axi_data->src_info[VFE_PIX_0].width) *
+			stream_info->max_width;
+		stream_info->bandwidth = (unsigned long)stream_info->bandwidth *
+			stream_info->format_factor / ISP_Q2;
+	} else {
+		int rdi = SRC_TO_INTF(stream_info->stream_src);
+
+		bpp = msm_isp_get_bit_per_pixel(stream_info->output_format);
+		if (rdi < VFE_SRC_MAX)
+			stream_info->bandwidth =
+				(axi_data->src_info[rdi].pixel_clock / 8) * bpp;
+		else
+			pr_err("%s: Invalid rdi interface\n", __func__);
+	}
+}
+
+#ifdef CONFIG_MSM_AVTIMER
+void msm_isp_start_avtimer(void)
+{
+	avcs_core_open();
+	avcs_core_disable_power_collapse(1);
+}
+
+static inline void msm_isp_get_avtimer_ts(
+		struct msm_isp_timestamp *time_stamp)
+{
+	int rc = 0;
+	uint32_t avtimer_usec = 0;
+	uint64_t avtimer_tick = 0;
+
+	rc = avcs_core_query_timer(&avtimer_tick);
+	if (rc < 0) {
+		pr_err("%s: Error: Invalid AVTimer Tick, rc=%d\n",
+			   __func__, rc);
+		/* In case of error return zero AVTimer Tick Value */
+		time_stamp->vt_time.tv_sec = 0;
+		time_stamp->vt_time.tv_usec = 0;
+	} else {
+		avtimer_usec = do_div(avtimer_tick, USEC_PER_SEC);
+		time_stamp->vt_time.tv_sec = (uint32_t)(avtimer_tick);
+		time_stamp->vt_time.tv_usec = avtimer_usec;
+		pr_debug("%s: AVTimer TS = %u:%u\n", __func__,
+			(uint32_t)(avtimer_tick), avtimer_usec);
+	}
+}
+#else
+void msm_isp_start_avtimer(void)
+{
+	pr_err("AV Timer is not supported\n");
+}
+
+inline void msm_isp_get_avtimer_ts(
+		struct msm_isp_timestamp *time_stamp)
+{
+	pr_err_ratelimited("%s: Error: AVTimer driver not available\n",
+		__func__);
+	time_stamp->vt_time.tv_sec = 0;
+	time_stamp->vt_time.tv_usec = 0;
+}
+#endif
+
+int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0, i;
+	uint32_t io_format = 0;
+	struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd = arg;
+	struct msm_vfe_axi_stream *stream_info;
+
+	rc = msm_isp_axi_create_stream(
+		&vfe_dev->axi_data, stream_cfg_cmd);
+	if (rc) {
+		pr_err("%s: create stream failed\n", __func__);
+		return rc;
+	}
+
+	rc = msm_isp_validate_axi_request(
+		&vfe_dev->axi_data, stream_cfg_cmd);
+	if (rc) {
+		pr_err("%s: Request validation failed\n", __func__);
+		if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle) <
+			MAX_NUM_STREAM)
+			msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
+			      HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle));
+		return rc;
+	}
+	stream_info = &vfe_dev->axi_data.
+		stream_info[HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
+	if (!stream_info) {
+		pr_err("%s: can not find stream handle %x\n", __func__,
+			stream_cfg_cmd->axi_stream_handle);
+		return -EINVAL;
+	}
+
+	stream_info->memory_input = stream_cfg_cmd->memory_input;
+
+	msm_isp_axi_reserve_wm(&vfe_dev->axi_data, stream_info);
+
+	if (stream_info->stream_src < RDI_INTF_0) {
+		io_format = vfe_dev->axi_data.src_info[VFE_PIX_0].input_format;
+		if (stream_info->stream_src == CAMIF_RAW ||
+			stream_info->stream_src == IDEAL_RAW) {
+			if (stream_info->stream_src == CAMIF_RAW &&
+				io_format != stream_info->output_format)
+				pr_debug("%s: Overriding input format\n",
+					__func__);
+
+			io_format = stream_info->output_format;
+		}
+		rc = vfe_dev->hw_info->vfe_ops.axi_ops.cfg_io_format(
+			vfe_dev, stream_info->stream_src, io_format);
+		if (rc) {
+			pr_err("%s: cfg io format failed\n", __func__);
+			msm_isp_axi_free_wm(&vfe_dev->axi_data,
+				stream_info);
+			msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
+				HANDLE_TO_IDX(
+				stream_cfg_cmd->axi_stream_handle));
+			return rc;
+		}
+	}
+
+	msm_isp_calculate_framedrop(&vfe_dev->axi_data, stream_cfg_cmd);
+	if (stream_cfg_cmd->vt_enable && !vfe_dev->vt_enable) {
+		vfe_dev->vt_enable = stream_cfg_cmd->vt_enable;
+		msm_isp_start_avtimer();
+	}
+	if (stream_info->num_planes > 1) {
+		msm_isp_axi_reserve_comp_mask(
+			&vfe_dev->axi_data, stream_info);
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+		cfg_comp_mask(vfe_dev, stream_info);
+	} else {
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+			cfg_wm_irq_mask(vfe_dev, stream_info);
+	}
+
+	for (i = 0; i < stream_info->num_planes; i++) {
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+			cfg_wm_reg(vfe_dev, stream_info, i);
+
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+			cfg_wm_xbar_reg(vfe_dev, stream_info, i);
+	}
+	return rc;
+}
+
+int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0, i;
+	struct msm_vfe_axi_stream_release_cmd *stream_release_cmd = arg;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_stream_cfg_cmd stream_cfg;
+
+
+	if (HANDLE_TO_IDX(stream_release_cmd->stream_handle) >=
+		MAX_NUM_STREAM) {
+		pr_err("%s: Invalid stream handle\n", __func__);
+		return -EINVAL;
+	}
+	stream_info = &axi_data->stream_info[
+		HANDLE_TO_IDX(stream_release_cmd->stream_handle)];
+	if (stream_info->state == AVAILABLE) {
+		pr_err("%s: Stream already released\n", __func__);
+		return -EINVAL;
+	} else if (stream_info->state != INACTIVE) {
+		stream_cfg.cmd = STOP_STREAM;
+		stream_cfg.num_streams = 1;
+		stream_cfg.stream_handle[0] = stream_release_cmd->stream_handle;
+		msm_isp_cfg_axi_stream(vfe_dev, (void *) &stream_cfg);
+	}
+
+	for (i = 0; i < stream_info->num_planes; i++) {
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+			clear_wm_reg(vfe_dev, stream_info, i);
+
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+		clear_wm_xbar_reg(vfe_dev, stream_info, i);
+	}
+
+	if (stream_info->num_planes > 1) {
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+			clear_comp_mask(vfe_dev, stream_info);
+		msm_isp_axi_free_comp_mask(&vfe_dev->axi_data, stream_info);
+	} else {
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+		clear_wm_irq_mask(vfe_dev, stream_info);
+	}
+
+	vfe_dev->hw_info->vfe_ops.axi_ops.clear_framedrop(vfe_dev, stream_info);
+	msm_isp_axi_free_wm(axi_data, stream_info);
+
+	msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
+		HANDLE_TO_IDX(stream_release_cmd->stream_handle));
+
+	return rc;
+}
+
+static void msm_isp_axi_stream_enable_cfg(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info)
+{
+	int i;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+	if (stream_info->state == INACTIVE)
+		return;
+	for (i = 0; i < stream_info->num_planes; i++) {
+		if (stream_info->state == START_PENDING ||
+			stream_info->state == RESUME_PENDING) {
+			vfe_dev->hw_info->vfe_ops.axi_ops.
+				enable_wm(vfe_dev, stream_info->wm[i], 1);
+		} else {
+			vfe_dev->hw_info->vfe_ops.axi_ops.
+				enable_wm(vfe_dev, stream_info->wm[i], 0);
+			/* Issue a reg update for Raw Snapshot Case
+			 * since we dont have reg update ack
+			 */
+			if (stream_info->stream_src == CAMIF_RAW ||
+				stream_info->stream_src == IDEAL_RAW) {
+				vfe_dev->hw_info->vfe_ops.core_ops.
+				reg_update(vfe_dev, (1 << VFE_PIX_0));
+			}
+		}
+	}
+
+	if (stream_info->state == START_PENDING)
+		axi_data->num_active_stream++;
+	else if (stream_info->state == STOP_PENDING)
+		axi_data->num_active_stream--;
+}
+
+void msm_isp_axi_stream_update(struct vfe_device *vfe_dev, uint8_t input_src)
+{
+	int i;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+	for (i = 0; i < MAX_NUM_STREAM; i++) {
+		if (axi_data->stream_info[i].state == START_PENDING ||
+				axi_data->stream_info[i].state ==
+					STOP_PENDING) {
+			if ((1 <<
+				SRC_TO_INTF(axi_data->stream_info[i].
+				stream_src)) &
+				input_src) {
+				msm_isp_axi_stream_enable_cfg(
+				   vfe_dev, &axi_data->stream_info[i]);
+				axi_data->stream_info[i].state =
+					axi_data->stream_info[i].state ==
+					START_PENDING ? STARTING : STOPPING;
+			}
+		} else if (axi_data->stream_info[i].state == STARTING ||
+			axi_data->stream_info[i].state == STOPPING) {
+			if ((1 <<
+				SRC_TO_INTF(axi_data->stream_info[i].
+				stream_src)) &
+				input_src) {
+				axi_data->stream_info[i].state =
+				axi_data->stream_info[i].state == STARTING ?
+					ACTIVE : INACTIVE;
+				vfe_dev->axi_data.stream_update--;
+			}
+		}
+	}
+
+	if (vfe_dev->axi_data.pipeline_update == DISABLE_CAMIF ||
+		(vfe_dev->axi_data.pipeline_update ==
+		DISABLE_CAMIF_IMMEDIATELY)) {
+		vfe_dev->hw_info->vfe_ops.stats_ops.
+			enable_module(vfe_dev, 0xFF, 0);
+		vfe_dev->axi_data.pipeline_update = NO_UPDATE;
+	}
+
+	if (vfe_dev->axi_data.stream_update == 0)
+		complete(&vfe_dev->stream_config_complete);
+}
+
+static void msm_isp_reload_ping_pong_offset(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info)
+{
+	int i, j;
+	uint32_t flag;
+	struct msm_isp_buffer *buf;
+
+	for (i = 0; i < 2; i++) {
+		buf = stream_info->buf[i];
+		if (!buf)
+			continue;
+		flag = i ? VFE_PONG_FLAG : VFE_PING_FLAG;
+		for (j = 0; j < stream_info->num_planes; j++) {
+			vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
+				vfe_dev, stream_info->wm[j], flag,
+				buf->mapped_info[j].paddr +
+				stream_info->plane_cfg[j].plane_addr_offset);
+		}
+	}
+}
+
+void msm_isp_axi_cfg_update(struct vfe_device *vfe_dev)
+{
+	int i, j;
+	uint32_t update_state;
+	unsigned long flags;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	struct msm_vfe_axi_stream *stream_info;
+
+	for (i = 0; i < MAX_NUM_STREAM; i++) {
+		stream_info = &axi_data->stream_info[i];
+		if (stream_info->stream_type == BURST_STREAM ||
+			stream_info->state == AVAILABLE)
+			continue;
+		spin_lock_irqsave(&stream_info->lock, flags);
+		if (stream_info->state == PAUSING) {
+			/*AXI Stopped, apply update*/
+			stream_info->state = PAUSED;
+			msm_isp_reload_ping_pong_offset(vfe_dev, stream_info);
+			for (j = 0; j < stream_info->num_planes; j++)
+				vfe_dev->hw_info->vfe_ops.axi_ops.
+					cfg_wm_reg(vfe_dev, stream_info, j);
+			/*Resume AXI*/
+			stream_info->state = RESUME_PENDING;
+			msm_isp_axi_stream_enable_cfg(
+				vfe_dev, &axi_data->stream_info[i]);
+			stream_info->state = RESUMING;
+		} else if (stream_info->state == RESUMING) {
+			stream_info->runtime_output_format =
+				stream_info->output_format;
+			stream_info->state = ACTIVE;
+		}
+		spin_unlock_irqrestore(&stream_info->lock, flags);
+	}
+
+	update_state = atomic_dec_return(&axi_data->axi_cfg_update);
+}
+
+static void msm_isp_cfg_pong_address(struct vfe_device *vfe_dev,
+		struct msm_vfe_axi_stream *stream_info)
+{
+	int i;
+	struct msm_isp_buffer *buf = stream_info->buf[0];
+
+	for (i = 0; i < stream_info->num_planes; i++)
+		vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
+			vfe_dev, stream_info->wm[i],
+			VFE_PONG_FLAG, buf->mapped_info[i].paddr +
+			stream_info->plane_cfg[i].plane_addr_offset);
+	stream_info->buf[1] = buf;
+}
+
+static void msm_isp_get_done_buf(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_status,
+	struct msm_isp_buffer **done_buf)
+{
+	uint32_t pingpong_bit = 0, i;
+
+	pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
+	for (i = 0; i < stream_info->num_planes; i++) {
+		if (pingpong_bit !=
+			(~(pingpong_status >> stream_info->wm[i]) & 0x1)) {
+			pr_debug("%s: Write master ping pong mismatch. Status: 0x%x\n",
+				__func__, pingpong_status);
+		}
+	}
+
+	*done_buf = stream_info->buf[pingpong_bit];
+
+	if (stream_info->controllable_output) {
+		stream_info->buf[pingpong_bit] = NULL;
+		stream_info->request_frm_num--;
+	}
+}
+
+static int msm_isp_cfg_ping_pong_address(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_status,
+	uint32_t pingpong_bit)
+{
+	int i, rc = -1;
+	struct msm_isp_buffer *buf = NULL;
+	uint32_t bufq_handle = 0, frame_id = 0;
+	uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+
+	if (stream_idx >= MAX_NUM_STREAM) {
+		pr_err("%s: Invalid stream_idx", __func__);
+		return rc;
+	}
+
+	if (stream_info->controllable_output && !stream_info->request_frm_num)
+		return 0;
+
+	frame_id = vfe_dev->axi_data.
+		src_info[SRC_TO_INTF(stream_info->stream_src)].frame_id;
+	if (frame_id && stream_info->frame_id &&
+		stream_info->frame_id == frame_id) {
+		/* This could happen if reg update ack is delayed */
+		pr_err("%s: Duplicate frame streamId:%d stream_fid:%d frame_id:%d\n",
+			__func__, stream_info->stream_id, stream_info->frame_id,
+			frame_id);
+		vfe_dev->error_info.stream_framedrop_count[stream_idx]++;
+		return rc;
+	}
+
+	bufq_handle = stream_info->bufq_handle;
+	if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
+		rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
+				vfe_dev->pdev->id, bufq_handle,
+				MSM_ISP_INVALID_BUF_INDEX, &buf);
+	else {
+		pr_err("%s: Invalid stream index\n", __func__);
+		rc = -1;
+	}
+
+	if (rc < 0) {
+		vfe_dev->error_info.stream_framedrop_count[stream_idx]++;
+		return rc;
+	}
+
+	if (buf->num_planes != stream_info->num_planes) {
+		pr_err("%s: Invalid buffer\n", __func__);
+		rc = -EINVAL;
+		goto buf_error;
+	}
+
+	for (i = 0; i < stream_info->num_planes; i++)
+		vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
+			vfe_dev, stream_info->wm[i],
+			pingpong_status, buf->mapped_info[i].paddr +
+			stream_info->plane_cfg[i].plane_addr_offset);
+
+	stream_info->buf[pingpong_bit] = buf;
+
+	return 0;
+buf_error:
+	vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+		buf->bufq_handle, buf->buf_idx);
+	return rc;
+}
+
+static void msm_isp_process_done_buf(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info, struct msm_isp_buffer *buf,
+	struct msm_isp_timestamp *ts)
+{
+	int rc;
+	struct msm_isp32_event_data buf_event;
+	struct timeval *time_stamp;
+	uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+	uint32_t frame_id;
+	uint32_t buf_src;
+
+	memset(&buf_event, 0, sizeof(buf_event));
+
+	if (stream_idx >= MAX_NUM_STREAM) {
+		pr_err("%s: Invalid stream_idx", __func__);
+		return;
+	}
+
+	if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
+		frame_id = vfe_dev->axi_data.
+			src_info[SRC_TO_INTF(stream_info->stream_src)].frame_id;
+	else {
+		pr_err("%s: Invalid stream index, put buf back to vb2 queue\n",
+			__func__);
+		vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+			buf->bufq_handle, buf->buf_idx);
+		return;
+	}
+
+	if (buf && ts) {
+		if (vfe_dev->vt_enable) {
+			msm_isp_get_avtimer_ts(ts);
+			time_stamp = &ts->vt_time;
+		} else
+			time_stamp = &ts->buf_time;
+
+		rc = vfe_dev->buf_mgr->ops->get_buf_src(vfe_dev->buf_mgr,
+						buf->bufq_handle, &buf_src);
+		if (stream_info->buf_divert && rc == 0 &&
+				buf_src != MSM_ISP_BUFFER_SRC_SCRATCH) {
+			rc = vfe_dev->buf_mgr->ops->buf_divert(vfe_dev->buf_mgr,
+				buf->bufq_handle, buf->buf_idx,
+				time_stamp, frame_id);
+			/* Buf divert return value represent whether the buf
+			 * can be diverted. A positive return value means
+			 * other ISP hardware is still processing the frame.
+			 */
+			if (rc == 0) {
+				buf_event.input_intf =
+					SRC_TO_INTF(stream_info->stream_src);
+				buf_event.frame_id = frame_id;
+				buf_event.timestamp = *time_stamp;
+				buf_event.u.buf_done.session_id =
+					stream_info->session_id;
+				buf_event.u.buf_done.stream_id =
+					stream_info->stream_id;
+				buf_event.u.buf_done.handle =
+					stream_info->bufq_handle;
+				buf_event.u.buf_done.buf_idx = buf->buf_idx;
+				buf_event.u.buf_done.output_format =
+					stream_info->runtime_output_format;
+				msm_isp_send_event(vfe_dev,
+					ISP_EVENT_BUF_DIVERT + stream_idx,
+					&buf_event);
+			}
+		} else {
+			buf_event.input_intf =
+				SRC_TO_INTF(stream_info->stream_src);
+			buf_event.frame_id = frame_id;
+			buf_event.timestamp = ts->buf_time;
+			buf_event.u.buf_done.session_id =
+				stream_info->session_id;
+			buf_event.u.buf_done.stream_id =
+				stream_info->stream_id;
+			buf_event.u.buf_done.output_format =
+				stream_info->runtime_output_format;
+			msm_isp_send_event(vfe_dev,
+				ISP_EVENT_BUF_DONE, &buf_event);
+			vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
+				buf->bufq_handle, buf->buf_idx,
+				time_stamp, frame_id,
+				stream_info->runtime_output_format);
+		}
+	}
+}
+
+static enum msm_isp_camif_update_state
+	msm_isp_get_camif_update_state(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+{
+	int i;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	uint8_t pix_stream_cnt = 0, cur_pix_stream_cnt;
+
+	cur_pix_stream_cnt =
+		axi_data->src_info[VFE_PIX_0].pix_stream_count +
+		axi_data->src_info[VFE_PIX_0].raw_stream_count;
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		stream_info =
+			&axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+		if (stream_info->stream_src  < RDI_INTF_0)
+			pix_stream_cnt++;
+	}
+
+	if ((pix_stream_cnt) &&
+	    (axi_data->src_info[VFE_PIX_0].input_mux != EXTERNAL_READ)) {
+
+		if (cur_pix_stream_cnt == 0 && pix_stream_cnt &&
+			stream_cfg_cmd->cmd == START_STREAM)
+			return ENABLE_CAMIF;
+		else if (cur_pix_stream_cnt &&
+			(cur_pix_stream_cnt - pix_stream_cnt) == 0 &&
+			stream_cfg_cmd->cmd == STOP_STREAM)
+			return DISABLE_CAMIF;
+		else if (cur_pix_stream_cnt &&
+			(cur_pix_stream_cnt - pix_stream_cnt) == 0 &&
+			stream_cfg_cmd->cmd == STOP_IMMEDIATELY)
+			return DISABLE_CAMIF_IMMEDIATELY;
+	}
+
+	return NO_UPDATE;
+}
+
+static void msm_isp_update_camif_output_count(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+{
+	int i;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+	if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+		return;
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+			MAX_NUM_STREAM) {
+			return;
+		}
+		stream_info =
+			&axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+		if (stream_info->stream_src >= RDI_INTF_0)
+			continue;
+		if (stream_info->stream_src == PIX_ENCODER ||
+			stream_info->stream_src == PIX_VIEWFINDER ||
+			stream_info->stream_src == PIX_VIDEO ||
+			stream_info->stream_src == IDEAL_RAW) {
+			if (stream_cfg_cmd->cmd == START_STREAM)
+				vfe_dev->axi_data.src_info[VFE_PIX_0].
+					pix_stream_count++;
+			else
+				vfe_dev->axi_data.src_info[VFE_PIX_0].
+					pix_stream_count--;
+		} else if (stream_info->stream_src == CAMIF_RAW) {
+			if (stream_cfg_cmd->cmd == START_STREAM)
+				vfe_dev->axi_data.src_info[VFE_PIX_0].
+					raw_stream_count++;
+			else
+				vfe_dev->axi_data.src_info[VFE_PIX_0].
+					raw_stream_count--;
+		}
+	}
+}
+
+
+static void msm_isp_update_rdi_output_count(
+	  struct vfe_device *vfe_dev,
+	  struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+{
+	int i;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+	if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+		return;
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])
+			> MAX_NUM_STREAM)
+			return;
+		stream_info =
+			&axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+		if (stream_info->stream_src < RDI_INTF_0)
+			continue;
+		if (stream_info->stream_src == RDI_INTF_0) {
+			if (stream_cfg_cmd->cmd == START_STREAM)
+				vfe_dev->axi_data.src_info[VFE_RAW_0].
+					raw_stream_count++;
+			else
+				vfe_dev->axi_data.src_info[VFE_RAW_0].
+					raw_stream_count--;
+		} else if (stream_info->stream_src == RDI_INTF_1) {
+			if (stream_cfg_cmd->cmd == START_STREAM)
+				vfe_dev->axi_data.src_info[VFE_RAW_1].
+					raw_stream_count++;
+			else
+				vfe_dev->axi_data.src_info[VFE_RAW_1].
+					raw_stream_count--;
+		} else if (stream_info->stream_src == RDI_INTF_2) {
+			if (stream_cfg_cmd->cmd == START_STREAM)
+				vfe_dev->axi_data.src_info[VFE_RAW_2].
+					raw_stream_count++;
+			else
+				vfe_dev->axi_data.src_info[VFE_RAW_2].
+					raw_stream_count--;
+		}
+
+	}
+}
+
+static uint8_t msm_isp_get_curr_stream_cnt(
+	  struct vfe_device *vfe_dev)
+{
+	uint8_t curr_stream_cnt = 0;
+
+	curr_stream_cnt = vfe_dev->axi_data.src_info[VFE_RAW_0].
+					raw_stream_count +
+					vfe_dev->axi_data.src_info[VFE_RAW_1].
+					raw_stream_count +
+					vfe_dev->axi_data.src_info[VFE_RAW_2].
+					raw_stream_count +
+					vfe_dev->axi_data.src_info[VFE_PIX_0].
+					pix_stream_count +
+					vfe_dev->axi_data.src_info[VFE_PIX_0].
+					raw_stream_count;
+	return curr_stream_cnt;
+}
+
+/*Factor in Q2 format*/
+#define ISP_DEFAULT_FORMAT_FACTOR 6
+#define ISP_BUS_UTILIZATION_FACTOR 1536 /* 1.5 in Q10 format */
+static int msm_isp_update_stream_bandwidth(struct vfe_device *vfe_dev)
+{
+	int i, rc = 0;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	uint64_t total_pix_bandwidth = 0, total_rdi_bandwidth = 0;
+	uint32_t num_pix_streams = 0;
+	uint32_t num_rdi_streams = 0;
+	uint32_t total_streams   = 0;
+	uint64_t total_bandwidth = 0;
+	uint32_t bus_util_factor = ISP_BUS_UTILIZATION_FACTOR;
+
+	for (i = 0; i < MAX_NUM_STREAM; i++) {
+		stream_info = &axi_data->stream_info[i];
+		if (stream_info->state == ACTIVE ||
+			stream_info->state == START_PENDING) {
+			if (stream_info->stream_src < RDI_INTF_0) {
+				total_pix_bandwidth += stream_info->bandwidth;
+				num_pix_streams++;
+			} else {
+				total_rdi_bandwidth += stream_info->bandwidth;
+				num_rdi_streams++;
+			}
+		}
+	}
+	total_bandwidth = total_pix_bandwidth + total_rdi_bandwidth;
+	total_streams = num_pix_streams + num_rdi_streams;
+	if (vfe_dev->bus_util_factor)
+		bus_util_factor = vfe_dev->bus_util_factor;
+	ISP_DBG("%s: bus_util_factor = %u\n", __func__, bus_util_factor);
+
+	if (total_streams == 1)
+		rc = msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id,
+		total_bandwidth,
+		(total_bandwidth * bus_util_factor / ISP_Q10));
+	else
+		rc = msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id,
+		(total_bandwidth + MSM_ISP_MIN_AB),  (total_bandwidth *
+		bus_util_factor / ISP_Q10 + MSM_ISP_MIN_IB));
+	if (rc < 0)
+		pr_err("%s: update failed\n", __func__);
+
+	return rc;
+}
+
+static int msm_isp_axi_wait_for_cfg_done(struct vfe_device *vfe_dev,
+	enum msm_isp_camif_update_state camif_update)
+{
+	int rc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
+	init_completion(&vfe_dev->stream_config_complete);
+	vfe_dev->axi_data.pipeline_update = camif_update;
+	spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
+	rc = wait_for_completion_timeout(
+		&vfe_dev->stream_config_complete,
+		msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
+	if (rc == 0) {
+		pr_err("%s: wait timeout\n", __func__);
+		rc = -1;
+	} else {
+		rc = 0;
+	}
+	return rc;
+}
+
+static int msm_isp_init_stream_ping_pong_reg(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info)
+{
+	int rc = 0;
+	/*Set address for both PING & PONG register */
+	rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+		stream_info, VFE_PING_FLAG, 0);
+	if (rc < 0) {
+		pr_err("%s: No free buffer for ping\n",
+			   __func__);
+		return rc;
+	}
+
+	/* For burst stream of one capture, only one buffer
+	 * is allocated. Duplicate ping buffer address to pong
+	 * buffer to ensure hardware write to a valid address
+	 */
+	if (stream_info->stream_type == BURST_STREAM &&
+		stream_info->runtime_num_burst_capture <= 1) {
+		msm_isp_cfg_pong_address(vfe_dev, stream_info);
+	} else {
+		rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+			stream_info, VFE_PONG_FLAG, 1);
+		if (rc < 0) {
+			pr_err("%s: No free buffer for pong\n",
+				   __func__);
+			return rc;
+		}
+	}
+	return rc;
+}
+
+static void msm_isp_deinit_stream_ping_pong_reg(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info)
+{
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		struct msm_isp_buffer *buf;
+
+		buf = stream_info->buf[i];
+		if (buf) {
+			vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+				buf->bufq_handle, buf->buf_idx);
+		}
+	}
+}
+
+static void msm_isp_get_stream_wm_mask(
+	struct msm_vfe_axi_stream *stream_info,
+	uint32_t *wm_reload_mask)
+{
+	int i;
+
+	for (i = 0; i < stream_info->num_planes; i++)
+		*wm_reload_mask |= (1 << stream_info->wm[i]);
+}
+
+int msm_isp_axi_halt(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_halt_cmd *halt_cmd)
+{
+	int rc = 0;
+
+	if (halt_cmd->overflow_detected) {
+		/*Store current IRQ mask*/
+		if (vfe_dev->error_info.overflow_recover_irq_mask0 == 0) {
+			vfe_dev->hw_info->vfe_ops.core_ops.get_irq_mask(vfe_dev,
+			&vfe_dev->error_info.overflow_recover_irq_mask0,
+			&vfe_dev->error_info.overflow_recover_irq_mask1);
+		}
+		atomic_set(&vfe_dev->error_info.overflow_state,
+			OVERFLOW_DETECTED);
+	}
+
+	rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
+
+	if (halt_cmd->stop_camif) {
+		vfe_dev->hw_info->vfe_ops.core_ops.
+			update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
+	}
+
+	return rc;
+}
+
+int msm_isp_axi_reset(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_reset_cmd *reset_cmd)
+{
+	int rc = 0, i, j;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	struct msm_isp_bufq *bufq = NULL;
+	struct msm_isp_timestamp timestamp;
+
+	if (!reset_cmd) {
+		pr_err("%s: NULL pointer reset cmd %pK\n", __func__, reset_cmd);
+		rc = -1;
+		return rc;
+	}
+
+	msm_isp_get_timestamp(&timestamp, vfe_dev);
+	rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
+		0, reset_cmd->blocking);
+
+	for (i = 0, j = 0; j < axi_data->num_active_stream &&
+		i < MAX_NUM_STREAM; i++, j++) {
+		stream_info = &axi_data->stream_info[i];
+		if (stream_info->stream_src >= VFE_AXI_SRC_MAX) {
+			rc = -1;
+			pr_err("%s invalid  stream src = %d\n", __func__,
+				stream_info->stream_src);
+			break;
+		}
+		if (stream_info->state != ACTIVE) {
+			j--;
+			continue;
+		}
+
+		bufq = vfe_dev->buf_mgr->ops->get_bufq(vfe_dev->buf_mgr,
+			stream_info->bufq_handle);
+		if (!bufq) {
+			pr_err("%s: bufq null %pK by handle %x\n", __func__,
+				bufq, stream_info->bufq_handle);
+			continue;
+		}
+
+		if (bufq->buf_type != ISP_SHARE_BUF) {
+			msm_isp_deinit_stream_ping_pong_reg(vfe_dev,
+				stream_info);
+		} else {
+			vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
+				stream_info->bufq_handle,
+				MSM_ISP_BUFFER_FLUSH_ALL,
+				&timestamp.buf_time,
+				reset_cmd->frame_id);
+		}
+		axi_data->src_info[SRC_TO_INTF(stream_info->stream_src)].
+			frame_id = reset_cmd->frame_id;
+		msm_isp_reset_burst_count_and_frame_drop(vfe_dev, stream_info);
+	}
+
+	if (rc < 0)
+		pr_err("%s Error! reset hw Timed out\n", __func__);
+
+	return rc;
+}
+
+int msm_isp_axi_restart(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_restart_cmd *restart_cmd)
+{
+	int rc = 0, i, j;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	uint32_t wm_reload_mask = 0x0;
+
+	for (i = 0, j = 0; j < axi_data->num_active_stream &&
+		i < MAX_NUM_STREAM; i++, j++) {
+		stream_info = &axi_data->stream_info[i];
+		if (stream_info->state != ACTIVE) {
+			j--;
+			continue;
+		}
+		msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
+		msm_isp_init_stream_ping_pong_reg(vfe_dev, stream_info);
+	}
+
+	vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev, wm_reload_mask);
+	rc = vfe_dev->hw_info->vfe_ops.axi_ops.restart(vfe_dev, 0,
+		restart_cmd->enable_camif);
+	if (rc < 0)
+		pr_err("%s Error restarting HW\n", __func__);
+
+	return rc;
+}
+
+static int msm_isp_axi_update_cgc_override(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+	uint8_t cgc_override)
+{
+	int i = 0, j = 0;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+	if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+		return -EINVAL;
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+				MAX_NUM_STREAM) {
+			return -EINVAL;
+		}
+		stream_info = &axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+		for (j = 0; j < stream_info->num_planes; j++) {
+			if (vfe_dev->hw_info->vfe_ops.axi_ops.
+				update_cgc_override)
+				vfe_dev->hw_info->vfe_ops.axi_ops.
+					update_cgc_override(vfe_dev,
+					stream_info->wm[j], cgc_override);
+		}
+	}
+	return 0;
+}
+
+static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
+			struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+			enum msm_isp_camif_update_state camif_update)
+{
+	int i, rc = 0;
+	uint8_t src_state, wait_for_complete = 0;
+	uint32_t wm_reload_mask = 0x0;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	uint8_t init_frm_drop = 0;
+
+	if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+		return -EINVAL;
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+			MAX_NUM_STREAM) {
+			return -EINVAL;
+		}
+		stream_info = &axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+		if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
+			src_state = axi_data->src_info[
+				SRC_TO_INTF(stream_info->stream_src)].active;
+		else {
+			pr_err("%s: invalid src info index\n", __func__);
+			return -EINVAL;
+		}
+
+		msm_isp_calculate_bandwidth(axi_data, stream_info);
+		msm_isp_reset_framedrop(vfe_dev, stream_info);
+		init_frm_drop = stream_info->init_frame_drop;
+		msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
+		rc = msm_isp_init_stream_ping_pong_reg(vfe_dev, stream_info);
+		if (rc < 0) {
+			pr_err("%s: No buffer for stream %d\n",
+					__func__,
+				HANDLE_TO_IDX(
+				stream_cfg_cmd->stream_handle[i]));
+			return rc;
+		}
+
+		stream_info->state = START_PENDING;
+		if (src_state) {
+			wait_for_complete = 1;
+		} else {
+			if (vfe_dev->dump_reg)
+				msm_camera_io_dump_2(vfe_dev->vfe_base, 0x900);
+
+			/*Configure AXI start bits to start immediately*/
+			msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info);
+			stream_info->state = ACTIVE;
+		}
+		if (SRC_TO_INTF(stream_info->stream_src) != VFE_PIX_0 &&
+			stream_info->stream_src < VFE_AXI_SRC_MAX) {
+			vfe_dev->axi_data.src_info[SRC_TO_INTF(
+				stream_info->stream_src)].frame_id =
+						init_frm_drop;
+		}
+	}
+	msm_isp_update_stream_bandwidth(vfe_dev);
+	vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev, wm_reload_mask);
+	vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, 0xF);
+	msm_isp_update_camif_output_count(vfe_dev, stream_cfg_cmd);
+	msm_isp_update_rdi_output_count(vfe_dev, stream_cfg_cmd);
+	if (camif_update == ENABLE_CAMIF) {
+		atomic_set(&vfe_dev->error_info.overflow_state,
+				NO_OVERFLOW);
+		vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id = 0;
+		vfe_dev->hw_info->vfe_ops.core_ops.
+			update_camif_state(vfe_dev, camif_update);
+	}
+
+	if (wait_for_complete) {
+		vfe_dev->axi_data.stream_update = stream_cfg_cmd->num_streams;
+		rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update);
+	}
+
+	return rc;
+}
+
+static int msm_isp_stop_axi_stream(struct vfe_device *vfe_dev,
+			struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+			enum msm_isp_camif_update_state camif_update)
+{
+	int i, rc = 0;
+	uint8_t wait_for_complete_for_this_stream = 0, cur_stream_cnt = 0;
+	uint8_t wait_for_complete = 0;
+	struct msm_vfe_axi_stream *stream_info = NULL;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	int    ext_read =
+		axi_data->src_info[VFE_PIX_0].input_mux == EXTERNAL_READ;
+
+	if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM ||
+		stream_cfg_cmd->num_streams == 0)
+		return -EINVAL;
+
+	msm_isp_update_camif_output_count(vfe_dev, stream_cfg_cmd);
+	msm_isp_update_rdi_output_count(vfe_dev, stream_cfg_cmd);
+	cur_stream_cnt = msm_isp_get_curr_stream_cnt(vfe_dev);
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+			MAX_NUM_STREAM) {
+			return -EINVAL;
+		}
+		stream_info = &axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+		wait_for_complete_for_this_stream = 0;
+		stream_info->state = STOP_PENDING;
+		if (stream_info->stream_src == CAMIF_RAW ||
+			stream_info->stream_src == IDEAL_RAW) {
+			/* We dont get reg update IRQ for raw snapshot
+			 * so frame skip cant be ocnfigured
+			 */
+			if ((camif_update != DISABLE_CAMIF_IMMEDIATELY) &&
+					(!ext_read))
+				wait_for_complete_for_this_stream = 1;
+		} else if (stream_info->stream_type == BURST_STREAM &&
+				stream_info->runtime_num_burst_capture == 0) {
+			/* Configure AXI writemasters to stop immediately
+			 * since for burst case, write masters already skip
+			 * all frames.
+			 */
+			if (stream_info->stream_src == RDI_INTF_0 ||
+				stream_info->stream_src == RDI_INTF_1 ||
+				stream_info->stream_src == RDI_INTF_2)
+				wait_for_complete_for_this_stream = 1;
+		} else {
+			if  ((camif_update != DISABLE_CAMIF_IMMEDIATELY) &&
+				(!ext_read) &&
+				!(stream_info->stream_src == RDI_INTF_0 ||
+				stream_info->stream_src == RDI_INTF_1 ||
+				stream_info->stream_src == RDI_INTF_2))
+				wait_for_complete_for_this_stream = 1;
+		}
+		if (!wait_for_complete_for_this_stream) {
+			msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info);
+			stream_info->state = INACTIVE;
+			vfe_dev->hw_info->vfe_ops.core_ops.
+				reg_update(vfe_dev, 0xF);
+		}
+		wait_for_complete |= wait_for_complete_for_this_stream;
+	}
+	if (wait_for_complete) {
+		vfe_dev->axi_data.stream_update = stream_cfg_cmd->num_streams;
+		vfe_dev->hw_info->vfe_ops.core_ops.
+				reg_update(vfe_dev, 0xF);
+		rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update);
+		if (rc < 0) {
+			pr_err("%s: wait for config done failed\n", __func__);
+			for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+				stream_info = &axi_data->stream_info[
+				HANDLE_TO_IDX(
+					stream_cfg_cmd->stream_handle[i])];
+				stream_info->state = STOP_PENDING;
+				vfe_dev->hw_info->vfe_ops.core_ops.
+					reg_update(vfe_dev, 0xF);
+				msm_isp_axi_stream_enable_cfg(
+					vfe_dev, stream_info);
+				stream_info->state = INACTIVE;
+			}
+		}
+	}
+	if (camif_update == DISABLE_CAMIF) {
+		vfe_dev->hw_info->vfe_ops.core_ops.
+			update_camif_state(vfe_dev, DISABLE_CAMIF);
+	} else if ((camif_update == DISABLE_CAMIF_IMMEDIATELY) ||
+					(ext_read)) {
+		/*during stop immediately, stop output then stop input*/
+		if (!ext_read)
+			vfe_dev->hw_info->vfe_ops.core_ops.
+				update_camif_state(vfe_dev,
+						DISABLE_CAMIF_IMMEDIATELY);
+	}
+	if (cur_stream_cnt == 0) {
+		vfe_dev->ignore_error = 1;
+		vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
+		vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 1, 1);
+		vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
+		vfe_dev->ignore_error = 0;
+	}
+	msm_isp_update_stream_bandwidth(vfe_dev);
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		stream_info = &axi_data->stream_info[
+			HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+		msm_isp_deinit_stream_ping_pong_reg(vfe_dev, stream_info);
+	}
+
+	return rc;
+}
+
+
+int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0;
+	struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd = arg;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	enum msm_isp_camif_update_state camif_update;
+
+	rc = msm_isp_axi_check_stream_state(vfe_dev, stream_cfg_cmd);
+	if (rc < 0) {
+		pr_err("%s: Invalid stream state\n", __func__);
+		return rc;
+	}
+
+	if (axi_data->num_active_stream == 0) {
+		/*Configure UB*/
+		vfe_dev->hw_info->vfe_ops.axi_ops.cfg_ub(vfe_dev);
+	}
+	camif_update = msm_isp_get_camif_update_state(vfe_dev, stream_cfg_cmd);
+
+	if (stream_cfg_cmd->cmd == START_STREAM) {
+		msm_isp_axi_update_cgc_override(vfe_dev, stream_cfg_cmd, 1);
+
+		rc = msm_isp_start_axi_stream(
+		   vfe_dev, stream_cfg_cmd, camif_update);
+	} else {
+		rc = msm_isp_stop_axi_stream(
+		   vfe_dev, stream_cfg_cmd, camif_update);
+
+		msm_isp_axi_update_cgc_override(vfe_dev, stream_cfg_cmd, 0);
+	}
+
+	if (rc < 0)
+		pr_err("%s: start/stop stream failed\n", __func__);
+	return rc;
+}
+
+static int msm_isp_request_frame(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info, uint32_t request_frm_num)
+{
+	struct msm_vfe32_axi_stream_request_cmd stream_cfg_cmd;
+	int rc = 0;
+	uint32_t pingpong_status, pingpong_bit, wm_reload_mask = 0x0;
+
+	if (!stream_info->controllable_output)
+		return 0;
+
+	if (!request_frm_num) {
+		pr_err("%s: Invalid frame request.\n", __func__);
+		return -EINVAL;
+	}
+
+	stream_info->request_frm_num += request_frm_num;
+
+	stream_cfg_cmd.axi_stream_handle = stream_info->stream_handle;
+	stream_cfg_cmd.frame_skip_pattern = NO_SKIP;
+	stream_cfg_cmd.init_frame_drop = 0;
+	stream_cfg_cmd.burst_count = stream_info->request_frm_num;
+	msm_isp_calculate_framedrop(&vfe_dev->axi_data, &stream_cfg_cmd);
+	msm_isp_reset_framedrop(vfe_dev, stream_info);
+
+	if (stream_info->request_frm_num != request_frm_num) {
+		pingpong_status =
+			vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(
+				vfe_dev);
+		pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
+
+		if (!stream_info->buf[pingpong_bit]) {
+			rc = msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
+				pingpong_status, pingpong_bit);
+			if (rc) {
+				pr_err("%s:%d fail to set ping pong address\n",
+					__func__, __LINE__);
+				return rc;
+			}
+		}
+
+		if (!stream_info->buf[!pingpong_bit] && request_frm_num > 1) {
+			rc = msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
+				~pingpong_status, !pingpong_bit);
+			if (rc) {
+				pr_err("%s:%d fail to set ping pong address\n",
+					__func__, __LINE__);
+				return rc;
+			}
+		}
+	} else {
+		if (!stream_info->buf[0]) {
+			rc = msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
+				VFE_PING_FLAG, 0);
+			if (rc) {
+				pr_err("%s:%d fail to set ping pong address\n",
+					__func__, __LINE__);
+				return rc;
+			}
+		}
+
+		if (!stream_info->buf[1] && request_frm_num > 1) {
+			rc = msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
+				VFE_PONG_FLAG, 1);
+			if (rc) {
+				pr_err("%s:%d fail to set ping pong address\n",
+					__func__, __LINE__);
+				return rc;
+			}
+		}
+
+		msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
+		vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+			wm_reload_mask);
+	}
+
+	return rc;
+}
+
+int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0, i, j;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
+	struct msm_vfe_axi_stream_cfg_update_info *update_info;
+	uint32_t frame_id;
+	struct msm_isp_timestamp timestamp;
+
+	if (update_cmd->update_type == UPDATE_STREAM_AXI_CONFIG &&
+		atomic_read(&axi_data->axi_cfg_update)) {
+		pr_err("%s: AXI stream config updating\n", __func__);
+		return -EBUSY;
+	}
+
+	/*num_stream is uint32 and update_info[] bound by MAX_NUM_STREAM*/
+	if (update_cmd->num_streams > MAX_NUM_STREAM)
+		return -EINVAL;
+
+	for (i = 0; i < update_cmd->num_streams; i++) {
+		update_info = &update_cmd->update_info[i];
+		/*check array reference bounds*/
+		if (HANDLE_TO_IDX(update_info->stream_handle) >=
+			MAX_NUM_STREAM) {
+			return -EINVAL;
+		}
+		stream_info = &axi_data->stream_info[
+				HANDLE_TO_IDX(update_info->stream_handle)];
+		if (stream_info->state != ACTIVE &&
+			stream_info->state != INACTIVE) {
+			pr_err("%s: Invalid stream state\n", __func__);
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < update_cmd->num_streams; i++) {
+		update_info = &update_cmd->update_info[i];
+		stream_info = &axi_data->stream_info[
+				HANDLE_TO_IDX(update_info->stream_handle)];
+
+		switch (update_cmd->update_type) {
+		case ENABLE_STREAM_BUF_DIVERT:
+			stream_info->buf_divert = 1;
+			break;
+		case DISABLE_STREAM_BUF_DIVERT:
+			msm_isp_get_timestamp(&timestamp, vfe_dev);
+			stream_info->buf_divert = 0;
+			frame_id = vfe_dev->axi_data.
+					src_info[SRC_TO_INTF(
+					stream_info->stream_src)].
+					frame_id;
+			vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
+					stream_info->bufq_handle,
+					MSM_ISP_BUFFER_FLUSH_DIVERTED,
+					&timestamp.buf_time, frame_id);
+			break;
+		case UPDATE_STREAM_FRAMEDROP_PATTERN: {
+			uint32_t framedrop_period =
+				msm_isp_get_framedrop_period(
+				   update_info->skip_pattern);
+			if (update_info->skip_pattern == SKIP_ALL)
+				stream_info->framedrop_pattern = 0x0;
+			else
+				stream_info->framedrop_pattern = 0x1;
+			stream_info->framedrop_period = framedrop_period - 1;
+			if (stream_info->stream_type == BURST_STREAM) {
+				stream_info->runtime_framedrop_update_burst = 1;
+			} else {
+				stream_info->runtime_init_frame_drop = 0;
+				vfe_dev->hw_info->vfe_ops.axi_ops.
+					cfg_framedrop(vfe_dev, stream_info);
+			}
+			break;
+		}
+		case UPDATE_STREAM_AXI_CONFIG: {
+			for (j = 0; j < stream_info->num_planes; j++) {
+				stream_info->plane_cfg[j] =
+					update_info->plane_cfg[j];
+			}
+			stream_info->output_format = update_info->output_format;
+			if (stream_info->state == ACTIVE) {
+				stream_info->state = PAUSE_PENDING;
+				msm_isp_axi_stream_enable_cfg(
+					vfe_dev, stream_info);
+				stream_info->state = PAUSING;
+				atomic_set(&axi_data->axi_cfg_update,
+					UPDATE_REQUESTED);
+			} else {
+				for (j = 0; j < stream_info->num_planes; j++)
+					vfe_dev->hw_info->vfe_ops.axi_ops.
+					cfg_wm_reg(vfe_dev, stream_info, j);
+				stream_info->runtime_output_format =
+					stream_info->output_format;
+			}
+			break;
+		}
+		case UPDATE_STREAM_REQUEST_FRAMES: {
+			rc = msm_isp_request_frame(vfe_dev, stream_info,
+				update_info->frame_id);
+			if (rc)
+				pr_err("%s failed to request frame!\n",
+					__func__);
+			break;
+		}
+		default:
+			pr_err("%s: Invalid update type\n", __func__);
+			return -EINVAL;
+		}
+	}
+	return rc;
+}
+
+void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
+	uint32_t irq_status0, uint32_t irq_status1,
+	struct msm_isp_timestamp *ts)
+{
+	int i, rc = 0;
+	struct msm_isp_buffer *done_buf = NULL;
+	uint32_t comp_mask = 0, wm_mask = 0;
+	uint32_t pingpong_status, stream_idx;
+	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_composite_info *comp_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	uint32_t pingpong_bit = 0, frame_id = 0;
+
+	comp_mask = vfe_dev->hw_info->vfe_ops.axi_ops.
+		get_comp_mask(irq_status0, irq_status1);
+	wm_mask = vfe_dev->hw_info->vfe_ops.axi_ops.
+		get_wm_mask(irq_status0, irq_status1);
+	if (!(comp_mask || wm_mask))
+		return;
+
+	ISP_DBG("%s: status: 0x%x\n", __func__, irq_status0);
+	pingpong_status =
+		vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(vfe_dev);
+	for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) {
+		rc = 0;
+		comp_info = &axi_data->composite_info[i];
+		if (comp_mask & (1 << i)) {
+			stream_idx = HANDLE_TO_IDX(comp_info->stream_handle);
+			if ((!comp_info->stream_handle) ||
+				(stream_idx >= MAX_NUM_STREAM)) {
+				pr_err("%s: Invalid handle for composite irq\n",
+					__func__);
+			} else {
+				stream_idx =
+					HANDLE_TO_IDX(comp_info->stream_handle);
+				stream_info =
+					&axi_data->stream_info[stream_idx];
+
+				pingpong_bit = (~(pingpong_status >>
+					stream_info->wm[0]) & 0x1);
+
+				if (stream_info->stream_type == BURST_STREAM)
+					stream_info->
+						runtime_num_burst_capture--;
+
+				msm_isp_get_done_buf(vfe_dev, stream_info,
+					pingpong_status, &done_buf);
+				if (stream_info->stream_type ==
+					CONTINUOUS_STREAM ||
+					stream_info->
+					runtime_num_burst_capture > 1) {
+					rc = msm_isp_cfg_ping_pong_address(
+							vfe_dev, stream_info,
+							pingpong_status,
+							pingpong_bit);
+				}
+				frame_id = vfe_dev->axi_data.
+					src_info[SRC_TO_INTF(
+					stream_info->stream_src)].
+					frame_id;
+				stream_info->frame_id = frame_id;
+				ISP_DBG("%s: stream id:%d frame id:%d\n",
+					__func__, stream_info->stream_id,
+					stream_info->frame_id);
+				if (done_buf && !rc)
+					msm_isp_process_done_buf(vfe_dev,
+					stream_info, done_buf, ts);
+			}
+		}
+		wm_mask &= ~(comp_info->stream_composite_mask);
+	}
+
+	for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+		if (wm_mask & (1 << i)) {
+			stream_idx = HANDLE_TO_IDX(axi_data->free_wm[i]);
+			if ((!axi_data->free_wm[i]) ||
+				(stream_idx >= MAX_NUM_STREAM)) {
+				pr_err("%s: Invalid handle for wm irq\n",
+					__func__);
+				continue;
+			}
+			stream_info = &axi_data->stream_info[stream_idx];
+
+			pingpong_bit = (~(pingpong_status >>
+				stream_info->wm[0]) & 0x1);
+
+			if (stream_info->stream_type == BURST_STREAM)
+				stream_info->runtime_num_burst_capture--;
+
+			msm_isp_get_done_buf(vfe_dev, stream_info,
+						pingpong_status, &done_buf);
+			if (stream_info->stream_type == CONTINUOUS_STREAM ||
+				stream_info->runtime_num_burst_capture > 1) {
+				rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+					stream_info, pingpong_status,
+					pingpong_bit);
+			}
+			stream_info->frame_id = frame_id;
+			ISP_DBG("%s: stream id:%d frame id:%d\n",
+				__func__, stream_info->stream_id,
+				stream_info->frame_id);
+			if (done_buf && !rc)
+				msm_isp_process_done_buf(vfe_dev,
+				stream_info, done_buf, ts);
+		}
+	}
+}
+int msm_isp_user_buf_done(struct vfe_device *vfe_dev,
+	struct msm_isp32_event_data *buf_cmd)
+{
+	int rc = 0;
+	struct msm_isp32_event_data buf_event;
+
+	memset(&buf_event, 0, sizeof(buf_event));
+	buf_event.input_intf = buf_cmd->input_intf;
+	buf_event.frame_id = buf_cmd->frame_id;
+	buf_event.timestamp = buf_cmd->timestamp;
+	buf_event.u.buf_done.session_id =
+	  buf_cmd->u.buf_done.session_id;
+	buf_event.u.buf_done.stream_id =
+	  buf_cmd->u.buf_done.stream_id;
+	buf_event.u.buf_done.output_format =
+	  buf_cmd->u.buf_done.output_format;
+	buf_event.u.buf_done.buf_idx =
+	  buf_cmd->u.buf_done.buf_idx;
+	buf_event.u.buf_done.handle =
+	   buf_cmd->u.buf_done.handle;
+
+	vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
+			buf_event.u.buf_done.handle,
+			buf_event.u.buf_done.buf_idx,
+			&buf_event.timestamp, buf_event.frame_id,
+			buf_event.u.buf_done.output_format);
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.h
new file mode 100644
index 0000000..5d89a84
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_ISP_AXI_UTIL_H__
+#define __MSM_ISP_AXI_UTIL_H__
+
+#include "msm_isp_32.h"
+
+int msm_isp_axi_create_stream(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd);
+
+void msm_isp_axi_destroy_stream(
+	struct msm_vfe_axi_shared_data *axi_data, int stream_idx);
+
+int msm_isp_validate_axi_request(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd);
+
+void msm_isp_axi_reserve_wm(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe_axi_stream *stream_info);
+
+void msm_isp_axi_reserve_comp_mask(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe_axi_stream *stream_info);
+
+int msm_isp_axi_check_stream_state(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd);
+
+void msm_isp_calculate_framedrop(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd);
+void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info);
+
+void msm_isp_start_avtimer(void);
+int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_axi_cfg_update(struct vfe_device *vfe_dev);
+int msm_isp_axi_halt(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_halt_cmd *halt_cmd);
+int msm_isp_axi_reset(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_reset_cmd *reset_cmd);
+int msm_isp_axi_restart(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_restart_cmd *restart_cmd);
+int msm_isp_user_buf_done(struct vfe_device *vfe_dev,
+	struct msm_isp32_event_data *buf_cmd);
+void msm_isp_axi_stream_update(struct vfe_device *vfe_dev,
+	uint8_t input_src);
+
+void msm_isp_update_framedrop_reg(struct vfe_device *vfe_dev,
+	uint8_t input_src);
+void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
+	enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts);
+void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
+	uint32_t irq_status0, uint32_t irq_status1,
+	struct msm_isp_timestamp *ts);
+void msm_isp_axi_free_wm(struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe_axi_stream *stream_info);
+#endif /* __MSM_ISP_AXI_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.c
new file mode 100644
index 0000000..8273298
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.c
@@ -0,0 +1,709 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <media/v4l2-subdev.h>
+#include <media/msmb_isp.h>
+#include "msm_isp_util_32.h"
+#include "msm_isp_stats_util_32.h"
+
+static int msm_isp_stats_cfg_ping_pong_address(struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream *stream_info, uint32_t pingpong_status,
+	struct msm_isp_buffer **done_buf)
+{
+	int rc = -1;
+	struct msm_isp_buffer *buf;
+	uint32_t pingpong_bit = 0;
+	uint32_t bufq_handle = stream_info->bufq_handle;
+	uint32_t stats_pingpong_offset;
+	uint32_t stats_idx = STATS_IDX(stream_info->stream_handle);
+
+	if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type ||
+		stats_idx >= MSM_ISP_STATS_MAX) {
+		pr_err("%s Invalid stats index %d", __func__, stats_idx);
+		return -EINVAL;
+	}
+
+	stats_pingpong_offset =
+		vfe_dev->hw_info->stats_hw_info->stats_ping_pong_offset[
+		stats_idx];
+
+	pingpong_bit = (~(pingpong_status >> stats_pingpong_offset) & 0x1);
+	rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
+			vfe_dev->pdev->id, bufq_handle,
+			MSM_ISP_INVALID_BUF_INDEX, &buf);
+	if (rc < 0) {
+		vfe_dev->error_info.stats_framedrop_count[stats_idx]++;
+		return rc;
+	}
+
+	if (buf->num_planes != 1) {
+		pr_err("%s: Invalid buffer\n", __func__);
+		rc = -EINVAL;
+		goto buf_error;
+	}
+
+	vfe_dev->hw_info->vfe_ops.stats_ops.update_ping_pong_addr(
+		vfe_dev, stream_info,
+		pingpong_status, buf->mapped_info[0].paddr +
+		stream_info->buffer_offset);
+
+	if (stream_info->buf[pingpong_bit] && done_buf)
+		*done_buf = stream_info->buf[pingpong_bit];
+
+	stream_info->buf[pingpong_bit] = buf;
+	return 0;
+buf_error:
+	vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+		buf->bufq_handle, buf->buf_idx);
+	return rc;
+}
+
+void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
+	uint32_t irq_status0, uint32_t irq_status1,
+	struct msm_isp_timestamp *ts)
+{
+	int i, j, rc;
+	struct msm_isp32_event_data buf_event;
+	struct msm_isp_stats_event *stats_event = &buf_event.u.stats;
+	struct msm_isp_buffer *done_buf;
+	struct msm_vfe_stats_stream *stream_info = NULL;
+	uint32_t pingpong_status;
+	uint32_t comp_stats_type_mask = 0, atomic_stats_mask = 0;
+	uint32_t stats_comp_mask = 0, stats_irq_mask = 0;
+	uint32_t num_stats_comp_mask =
+		vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
+	stats_comp_mask = vfe_dev->hw_info->vfe_ops.stats_ops.
+		get_comp_mask(irq_status0, irq_status1);
+	stats_irq_mask = vfe_dev->hw_info->vfe_ops.stats_ops.
+		get_wm_mask(irq_status0, irq_status1);
+	if (!(stats_comp_mask || stats_irq_mask))
+		return;
+	ISP_DBG("%s: status: 0x%x\n", __func__, irq_status0);
+
+	/*
+	 * If any of composite mask is set, clear irq bits from mask,
+	 * they will be restored by comp mask
+	 */
+	if (stats_comp_mask) {
+		for (j = 0; j < num_stats_comp_mask; j++) {
+			stats_irq_mask &= ~atomic_read(
+				&vfe_dev->stats_data.stats_comp_mask[j]);
+		}
+	}
+
+	for (j = 0; j < num_stats_comp_mask; j++) {
+		atomic_stats_mask = atomic_read(
+			&vfe_dev->stats_data.stats_comp_mask[j]);
+		if (!stats_comp_mask) {
+			stats_irq_mask &= ~atomic_stats_mask;
+		} else {
+			/* restore irq bits from composite mask */
+			if (stats_comp_mask & (1 << j))
+				stats_irq_mask |= atomic_stats_mask;
+		}
+		/* if no irq bits set from this composite mask continue*/
+		if (!stats_irq_mask)
+			continue;
+		memset(&buf_event, 0, sizeof(struct msm_isp32_event_data));
+		buf_event.timestamp = ts->event_time;
+		buf_event.frame_id =
+			vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+		buf_event.input_intf = VFE_PIX_0;
+		pingpong_status = vfe_dev->hw_info->
+			vfe_ops.stats_ops.get_pingpong_status(vfe_dev);
+
+		for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type;
+			i++) {
+			if (!(stats_irq_mask & (1 << i)))
+				continue;
+
+			stats_irq_mask &= ~(1 << i);
+			stream_info = &vfe_dev->stats_data.stream_info[i];
+			done_buf = NULL;
+			msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+				stream_info, pingpong_status, &done_buf);
+			if (done_buf) {
+				rc = vfe_dev->buf_mgr->ops->buf_divert(
+					vfe_dev->buf_mgr, done_buf->bufq_handle,
+					done_buf->buf_idx, &ts->buf_time,
+					vfe_dev->axi_data.
+					src_info[VFE_PIX_0].frame_id);
+				if (rc != 0)
+					continue;
+
+				stats_event->stats_buf_idxs
+					[stream_info->stats_type] =
+					done_buf->buf_idx;
+				if (!stream_info->composite_flag) {
+					stats_event->stats_mask =
+						1 << stream_info->stats_type;
+					ISP_DBG("%s: stats frameid: 0x%x %d\n",
+						__func__, buf_event.frame_id,
+						stream_info->stats_type);
+					msm_isp_send_event(vfe_dev,
+						ISP_EVENT_STATS_NOTIFY +
+						stream_info->stats_type,
+						&buf_event);
+				} else {
+					comp_stats_type_mask |=
+						1 << stream_info->stats_type;
+				}
+			}
+		}
+
+		if (comp_stats_type_mask) {
+			ISP_DBG("%s: comp_stats frameid: 0x%x, 0x%x\n",
+				__func__, buf_event.frame_id,
+				comp_stats_type_mask);
+			stats_event->stats_mask = comp_stats_type_mask;
+			msm_isp_send_event(vfe_dev,
+				ISP_EVENT_COMP_STATS_NOTIFY, &buf_event);
+			comp_stats_type_mask = 0;
+		}
+	}
+}
+
+int msm_isp_stats_create_stream(struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream_request_cmd *stream_req_cmd)
+{
+	int rc = -1;
+	struct msm_vfe_stats_stream *stream_info = NULL;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+	uint32_t stats_idx;
+
+	if (!(vfe_dev->hw_info->stats_hw_info->stats_capability_mask &
+		(1 << stream_req_cmd->stats_type))) {
+		pr_err("%s: Stats type not supported\n", __func__);
+		return rc;
+	}
+
+	stats_idx = vfe_dev->hw_info->vfe_ops.stats_ops.
+		get_stats_idx(stream_req_cmd->stats_type);
+
+	if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+		pr_err("%s Invalid stats index %d", __func__, stats_idx);
+		return -EINVAL;
+	}
+
+	stream_info = &stats_data->stream_info[stats_idx];
+	if (stream_info->state != STATS_AVAILABLE) {
+		pr_err("%s: Stats already requested\n", __func__);
+		return rc;
+	}
+
+	if (stream_req_cmd->framedrop_pattern >= MAX_SKIP) {
+		pr_err("%s: Invalid framedrop pattern\n", __func__);
+		return rc;
+	}
+
+	if (stream_req_cmd->irq_subsample_pattern >= MAX_SKIP) {
+		pr_err("%s: Invalid irq subsample pattern\n", __func__);
+		return rc;
+	}
+
+	stream_info->session_id = stream_req_cmd->session_id;
+	stream_info->stream_id = stream_req_cmd->stream_id;
+	stream_info->composite_flag = stream_req_cmd->composite_flag;
+	stream_info->stats_type = stream_req_cmd->stats_type;
+	stream_info->buffer_offset = stream_req_cmd->buffer_offset;
+	stream_info->framedrop_pattern = stream_req_cmd->framedrop_pattern;
+	stream_info->init_stats_frame_drop = stream_req_cmd->init_frame_drop;
+	stream_info->irq_subsample_pattern =
+		stream_req_cmd->irq_subsample_pattern;
+	stream_info->state = STATS_INACTIVE;
+
+	if ((vfe_dev->stats_data.stream_handle_cnt << 8) == 0)
+		vfe_dev->stats_data.stream_handle_cnt++;
+
+	stream_req_cmd->stream_handle =
+		(++vfe_dev->stats_data.stream_handle_cnt) << 8 | stats_idx;
+
+	stream_info->stream_handle = stream_req_cmd->stream_handle;
+	return 0;
+}
+
+int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = -1;
+	struct msm_vfe_stats_stream_request_cmd *stream_req_cmd = arg;
+	struct msm_vfe_stats_stream *stream_info = NULL;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+	uint32_t framedrop_period;
+	uint32_t stats_idx;
+
+	rc = msm_isp_stats_create_stream(vfe_dev, stream_req_cmd);
+	if (rc < 0) {
+		pr_err("%s: create stream failed\n", __func__);
+		return rc;
+	}
+
+	stats_idx = STATS_IDX(stream_req_cmd->stream_handle);
+
+	if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+		pr_err("%s Invalid stats index %d", __func__, stats_idx);
+		return -EINVAL;
+	}
+
+	stream_info = &stats_data->stream_info[stats_idx];
+
+	framedrop_period = msm_isp_get_framedrop_period(
+	   stream_req_cmd->framedrop_pattern);
+
+	if (stream_req_cmd->framedrop_pattern == SKIP_ALL)
+		stream_info->framedrop_pattern = 0x0;
+	else
+		stream_info->framedrop_pattern = 0x1;
+	stream_info->framedrop_period = framedrop_period - 1;
+
+	if (!stream_info->composite_flag)
+		vfe_dev->hw_info->vfe_ops.stats_ops.
+			cfg_wm_irq_mask(vfe_dev, stream_info);
+
+	if (stream_info->init_stats_frame_drop == 0)
+		vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(vfe_dev,
+			stream_info);
+
+	return rc;
+}
+
+int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = -1;
+	struct msm_vfe_stats_stream_cfg_cmd stream_cfg_cmd;
+	struct msm_vfe_stats_stream_release_cmd *stream_release_cmd = arg;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+	int stats_idx = STATS_IDX(stream_release_cmd->stream_handle);
+	struct msm_vfe_stats_stream *stream_info = NULL;
+
+	if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+		pr_err("%s Invalid stats index %d", __func__, stats_idx);
+		return -EINVAL;
+	}
+
+	stream_info = &stats_data->stream_info[stats_idx];
+	if (stream_info->state == STATS_AVAILABLE) {
+		pr_err("%s: stream already release\n", __func__);
+		return rc;
+	} else if (stream_info->state != STATS_INACTIVE) {
+		stream_cfg_cmd.enable = 0;
+		stream_cfg_cmd.num_streams = 1;
+		stream_cfg_cmd.stream_handle[0] =
+			stream_release_cmd->stream_handle;
+		rc = msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
+	}
+
+	if (!stream_info->composite_flag)
+		vfe_dev->hw_info->vfe_ops.stats_ops.
+			clear_wm_irq_mask(vfe_dev, stream_info);
+
+	vfe_dev->hw_info->vfe_ops.stats_ops.clear_wm_reg(vfe_dev, stream_info);
+	memset(stream_info, 0, sizeof(struct msm_vfe_stats_stream));
+	return 0;
+}
+
+static int msm_isp_init_stats_ping_pong_reg(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream *stream_info)
+{
+	int rc = 0;
+
+	stream_info->bufq_handle =
+		vfe_dev->buf_mgr->ops->get_bufq_handle(
+		vfe_dev->buf_mgr, stream_info->session_id,
+		stream_info->stream_id);
+	if (stream_info->bufq_handle == 0) {
+		pr_err("%s: no buf configured for stream: 0x%x\n",
+			__func__, stream_info->stream_handle);
+		return -EINVAL;
+	}
+
+	rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+		stream_info, VFE_PING_FLAG, NULL);
+	if (rc < 0) {
+		pr_err("%s: No free buffer for ping\n", __func__);
+		return rc;
+	}
+	rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+		stream_info, VFE_PONG_FLAG, NULL);
+	if (rc < 0) {
+		pr_err("%s: No free buffer for pong\n", __func__);
+		return rc;
+	}
+	return rc;
+}
+
+static void msm_isp_deinit_stats_ping_pong_reg(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream *stream_info)
+{
+	int i;
+	struct msm_isp_buffer *buf;
+
+	for (i = 0; i < 2; i++) {
+		buf = stream_info->buf[i];
+		if (buf)
+			vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+				buf->bufq_handle, buf->buf_idx);
+	}
+}
+
+void msm_isp_update_stats_framedrop_reg(struct vfe_device *vfe_dev)
+{
+	int i;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+	struct msm_vfe_stats_stream *stream_info = NULL;
+
+	for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
+		stream_info = &stats_data->stream_info[i];
+		if (stream_info->state != STATS_ACTIVE)
+			continue;
+
+		if (stream_info->init_stats_frame_drop) {
+			stream_info->init_stats_frame_drop--;
+			if (stream_info->init_stats_frame_drop == 0) {
+				vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(
+					vfe_dev, stream_info);
+			}
+		}
+	}
+}
+
+void msm_isp_stats_stream_update(struct vfe_device *vfe_dev)
+{
+	int i;
+	uint32_t stats_mask = 0, comp_stats_mask = 0;
+	uint32_t enable = 0;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+	for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
+		if (stats_data->stream_info[i].state == STATS_START_PENDING ||
+				stats_data->stream_info[i].state ==
+					STATS_STOP_PENDING) {
+			stats_mask |= i;
+			enable = stats_data->stream_info[i].state ==
+				STATS_START_PENDING ? 1 : 0;
+			stats_data->stream_info[i].state =
+				stats_data->stream_info[i].state ==
+				STATS_START_PENDING ?
+				STATS_STARTING : STATS_STOPPING;
+			vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+				vfe_dev, BIT(i), enable);
+			vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+			   vfe_dev, BIT(i), enable);
+		} else if (stats_data->stream_info[i].state == STATS_STARTING ||
+			stats_data->stream_info[i].state == STATS_STOPPING) {
+			if (stats_data->stream_info[i].composite_flag)
+				comp_stats_mask |= i;
+			stats_data->stream_info[i].state =
+				stats_data->stream_info[i].state ==
+				STATS_STARTING ? STATS_ACTIVE : STATS_INACTIVE;
+		}
+	}
+	atomic_sub(1, &stats_data->stats_update);
+	if (!atomic_read(&stats_data->stats_update))
+		complete(&vfe_dev->stats_config_complete);
+}
+
+static int msm_isp_stats_wait_for_cfg_done(struct vfe_device *vfe_dev)
+{
+	int rc;
+
+	init_completion(&vfe_dev->stats_config_complete);
+	atomic_set(&vfe_dev->stats_data.stats_update, 2);
+	rc = wait_for_completion_timeout(
+		&vfe_dev->stats_config_complete,
+		msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
+	if (rc == 0) {
+		pr_err("%s: wait timeout\n", __func__);
+		rc = -1;
+	} else {
+		rc = 0;
+	}
+	return rc;
+}
+
+static int msm_isp_stats_update_cgc_override(struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+	int i;
+	uint32_t stats_mask = 0, idx;
+
+	if (stream_cfg_cmd->num_streams > MSM_ISP_STATS_MAX) {
+		pr_err("%s invalid num_streams %d\n", __func__,
+			stream_cfg_cmd->num_streams);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+		if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+			pr_err("%s Invalid stats index %d", __func__, idx);
+			return -EINVAL;
+		}
+		stats_mask |= 1 << idx;
+	}
+
+	if (vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override) {
+		vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override(
+			vfe_dev, stats_mask, stream_cfg_cmd->enable);
+	}
+	return 0;
+}
+
+static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+	int i, rc = 0;
+	uint32_t stats_mask = 0, idx;
+	uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0};
+	uint32_t num_stats_comp_mask = 0;
+	struct msm_vfe_stats_stream *stream_info;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+	if (stream_cfg_cmd->num_streams > MSM_ISP_STATS_MAX) {
+		pr_err("%s invalid num_streams %d\n", __func__,
+			stream_cfg_cmd->num_streams);
+		return -EINVAL;
+	}
+
+	num_stats_comp_mask =
+		vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
+	rc = vfe_dev->hw_info->vfe_ops.stats_ops.check_streams(
+		stats_data->stream_info);
+	if (rc < 0)
+		return rc;
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+		if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+			pr_err("%s Invalid stats index %d", __func__, idx);
+			return -EINVAL;
+		}
+
+		stream_info = &stats_data->stream_info[idx];
+		if (stream_info->stream_handle !=
+				stream_cfg_cmd->stream_handle[i]) {
+			pr_err("%s: Invalid stream handle: 0x%x received\n",
+				__func__, stream_cfg_cmd->stream_handle[i]);
+			continue;
+		}
+
+		if (stream_info->composite_flag > num_stats_comp_mask) {
+			pr_err("%s: comp grp %d exceed max %d\n",
+				__func__, stream_info->composite_flag,
+				num_stats_comp_mask);
+			return -EINVAL;
+		}
+		rc = msm_isp_init_stats_ping_pong_reg(vfe_dev, stream_info);
+		if (rc < 0) {
+			pr_err("%s: No buffer for stream%d type:%d stmID:0x%x\n",
+				__func__, idx, stream_info->stats_type,
+				stream_info->stream_id);
+			return rc;
+		}
+
+		if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
+			stream_info->state = STATS_START_PENDING;
+		else
+			stream_info->state = STATS_ACTIVE;
+
+		stats_data->num_active_stream++;
+		stats_mask |= 1 << idx;
+
+		if (stream_info->composite_flag > 0)
+			comp_stats_mask[stream_info->composite_flag-1] |=
+				1 << idx;
+
+		ISP_DBG("%s: stats_mask %x %x active streams %d\n",
+			__func__, comp_stats_mask[0],
+			comp_stats_mask[1],
+			stats_data->num_active_stream);
+
+	}
+
+	if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+		rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
+	} else {
+		vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+			vfe_dev, stats_mask, stream_cfg_cmd->enable);
+		for (i = 0; i < num_stats_comp_mask; i++) {
+			vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+			 vfe_dev, comp_stats_mask[i], 1);
+		}
+	}
+	return rc;
+}
+
+static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+	int i, rc = 0;
+	uint32_t stats_mask = 0, idx;
+	uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0};
+	uint32_t num_stats_comp_mask = 0;
+	struct msm_vfe_stats_stream *stream_info;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+	num_stats_comp_mask =
+		vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+
+		idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+		if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+			pr_err("%s Invalid stats index %d", __func__, idx);
+			return -EINVAL;
+		}
+
+		stream_info = &stats_data->stream_info[idx];
+		if (stream_info->stream_handle !=
+				stream_cfg_cmd->stream_handle[i]) {
+			pr_err("%s: Invalid stream handle: 0x%x received\n",
+				__func__, stream_cfg_cmd->stream_handle[i]);
+			continue;
+		}
+
+		if (stream_info->composite_flag > num_stats_comp_mask) {
+			pr_err("%s: comp grp %d exceed max %d\n",
+				__func__, stream_info->composite_flag,
+				num_stats_comp_mask);
+			return -EINVAL;
+		}
+
+		if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
+			stream_info->state = STATS_STOP_PENDING;
+		else
+			stream_info->state = STATS_INACTIVE;
+
+		stats_data->num_active_stream--;
+		stats_mask |= 1 << idx;
+
+		if (stream_info->composite_flag > 0)
+			comp_stats_mask[stream_info->composite_flag-1] |=
+				1 << idx;
+
+		ISP_DBG("%s: stats_mask %x %x active streams %d\n",
+			__func__, comp_stats_mask[0],
+			comp_stats_mask[1],
+			stats_data->num_active_stream);
+	}
+
+	if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+		rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
+	} else {
+		vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+			vfe_dev, stats_mask, stream_cfg_cmd->enable);
+		for (i = 0; i < num_stats_comp_mask; i++) {
+			vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+			   vfe_dev, comp_stats_mask[i], 0);
+		}
+	}
+
+	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+		idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+		if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+			pr_err("%s Invalid stats index %d", __func__, idx);
+			return -EINVAL;
+		}
+
+		stream_info = &stats_data->stream_info[idx];
+		msm_isp_deinit_stats_ping_pong_reg(vfe_dev, stream_info);
+	}
+	return rc;
+}
+
+int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0;
+	struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd = arg;
+
+	if (vfe_dev->stats_data.num_active_stream == 0)
+		vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev);
+
+	if (stream_cfg_cmd->num_streams > MSM_ISP_STATS_MAX) {
+		pr_err("%s invalid num_streams %d\n", __func__,
+			stream_cfg_cmd->num_streams);
+		return -EINVAL;
+	}
+
+	if (stream_cfg_cmd->enable) {
+		msm_isp_stats_update_cgc_override(vfe_dev, stream_cfg_cmd);
+
+		rc = msm_isp_start_stats_stream(vfe_dev, stream_cfg_cmd);
+	} else {
+		rc = msm_isp_stop_stats_stream(vfe_dev, stream_cfg_cmd);
+
+		msm_isp_stats_update_cgc_override(vfe_dev, stream_cfg_cmd);
+	}
+
+	return rc;
+}
+
+int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0, i;
+	struct msm_vfe_stats_stream *stream_info;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+	struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
+	struct msm_vfe_axi_stream_cfg_update_info *update_info = NULL;
+
+	/*validate request*/
+	for (i = 0; i < update_cmd->num_streams; i++) {
+		update_info = &update_cmd->update_info[i];
+		/*check array reference bounds*/
+		if (STATS_IDX(update_info->stream_handle)
+			> vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+			pr_err("%s: stats idx %d out of bound!", __func__,
+				STATS_IDX(update_info->stream_handle));
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < update_cmd->num_streams; i++) {
+		update_info = &update_cmd->update_info[i];
+		stream_info = &stats_data->stream_info[
+				STATS_IDX(update_info->stream_handle)];
+		if (stream_info->stream_handle !=
+			update_info->stream_handle) {
+			pr_err("%s: stats stream handle %x %x mismatch!\n",
+				__func__, stream_info->stream_handle,
+				update_info->stream_handle);
+			continue;
+		}
+
+		switch (update_cmd->update_type) {
+		case UPDATE_STREAM_STATS_FRAMEDROP_PATTERN: {
+			uint32_t framedrop_period =
+				msm_isp_get_framedrop_period(
+				   update_info->skip_pattern);
+			if (update_info->skip_pattern == SKIP_ALL)
+				stream_info->framedrop_pattern = 0x0;
+			else
+				stream_info->framedrop_pattern = 0x1;
+			stream_info->framedrop_period = framedrop_period - 1;
+			if (stream_info->init_stats_frame_drop == 0)
+				vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(
+					vfe_dev, stream_info);
+			break;
+		}
+
+		default:
+			pr_err("%s: Invalid update type\n", __func__);
+			return -EINVAL;
+		}
+	}
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.h
new file mode 100644
index 0000000..11c0c28
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_ISP_STATS_UTIL_H__
+#define __MSM_ISP_STATS_UTIL_H__
+
+#include "msm_isp_32.h"
+#define STATS_IDX(idx) (idx & 0xFF)
+
+void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
+	uint32_t irq_status0, uint32_t irq_status1,
+	struct msm_isp_timestamp *ts);
+int msm_isp_stats_create_stream(struct vfe_device *vfe_dev,
+	struct msm_vfe_stats_stream_request_cmd *stream_req_cmd);
+void msm_isp_stats_stream_update(struct vfe_device *vfe_dev);
+int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_update_stats_framedrop_reg(struct vfe_device *vfe_dev);
+#endif /* __MSM_ISP_STATS_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index b589afa..e671122 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -410,8 +410,12 @@
 			pr_err("%s: Fetch engine config failed\n", __func__);
 			return -EINVAL;
 		}
-		for (i = 0; i < stream_info->num_planes; i++)
+		for (i = 0; i < stream_info->num_planes; i++) {
+			vfe_dev->hw_info->vfe_ops.axi_ops.enable_wm(
+				vfe_dev->vfe_base,
+				stream_info->wm[vfe_idx][i], 1);
 			wm_reload_mask |= (1 << stream_info->wm[vfe_idx][i]);
+		}
 		vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
 			VFE_SRC_MAX);
 		vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.c
new file mode 100644
index 0000000..f6ca41c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.c
@@ -0,0 +1,1939 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <linux/ratelimit.h>
+
+#include "msm.h"
+#include "msm_isp_util_32.h"
+#include "msm_isp_axi_util_32.h"
+#include "msm_isp_stats_util_32.h"
+#include "msm_camera_io_util.h"
+#include "cam_smmu_api.h"
+
+#ifndef UINT16_MAX
+#define UINT16_MAX             (65535U)
+#endif
+
+#define MAX_ISP_V4l2_EVENTS 100
+#define MAX_ISP_REG_LIST 100
+static DEFINE_MUTEX(bandwidth_mgr_mutex);
+static struct msm_isp_bandwidth_mgr isp_bandwidth_mgr;
+
+static uint64_t msm_isp_cpp_clk_rate;
+
+#define VFE40_8974V2_VERSION 0x1001001A
+static struct msm_bus_vectors msm_isp_init_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_VFE,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 0,
+		.ib  = 0,
+	},
+};
+
+static struct msm_bus_vectors msm_isp_ping_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_VFE,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = MSM_ISP_MIN_AB,
+		.ib  = MSM_ISP_MIN_IB,
+	},
+};
+
+static struct msm_bus_vectors msm_isp_pong_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_VFE,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = MSM_ISP_MIN_AB,
+		.ib  = MSM_ISP_MIN_IB,
+	},
+};
+
+static struct msm_bus_paths msm_isp_bus_client_config[] = {
+	{
+		ARRAY_SIZE(msm_isp_init_vectors),
+		msm_isp_init_vectors,
+	},
+	{
+		ARRAY_SIZE(msm_isp_ping_vectors),
+		msm_isp_ping_vectors,
+	},
+	{
+		ARRAY_SIZE(msm_isp_pong_vectors),
+		msm_isp_pong_vectors,
+	},
+};
+
+static struct msm_bus_scale_pdata msm_isp_bus_client_pdata = {
+	msm_isp_bus_client_config,
+	NULL,
+	ARRAY_SIZE(msm_isp_bus_client_config),
+	.name = "msm_camera_isp",
+	0,
+};
+
+
+void msm_camera_io_dump_2(void __iomem *addr, int size)
+{
+	char line_str[128], *p_str;
+	int i;
+	u32 __iomem *p = (u32 __iomem *) addr;
+	u32 data;
+
+	pr_err("%s: %pK %d\n", __func__, addr, size);
+	line_str[0] = '\0';
+	p_str = line_str;
+	for (i = 0; i < size/4; i++) {
+		if (i % 4 == 0) {
+#ifdef CONFIG_COMPAT
+			snprintf(p_str, 20, "%016lx: ", (unsigned long) p);
+			p_str += 18;
+#else
+			snprintf(p_str, 12, "%08lx: ", (unsigned long) p);
+			p_str += 10;
+#endif
+		}
+		data = readl_relaxed(p++);
+		snprintf(p_str, 12, "%08x ", data);
+		p_str += 9;
+		if ((i + 1) % 4 == 0) {
+			pr_err("%s\n", line_str);
+			line_str[0] = '\0';
+			p_str = line_str;
+		}
+	}
+	if (line_str[0] != '\0')
+		pr_err("%s\n", line_str);
+}
+
+void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format)
+{
+	int i;
+	char text[5];
+
+	text[4] = '\0';
+	for (i = 0; i < 4; i++) {
+		text[i] = (char)(((fourcc_format) >> (i * 8)) & 0xFF);
+		if ((text[i] < '0') || (text[i] > 'z')) {
+			pr_err("%s: Invalid output format %d (unprintable)\n",
+				origin, fourcc_format);
+			return;
+		}
+	}
+	pr_err("%s: Invalid output format %s\n",
+		origin, text);
+}
+
+int msm_isp_init_bandwidth_mgr(enum msm_isp_hw_client client)
+{
+	int rc = 0;
+
+	mutex_lock(&bandwidth_mgr_mutex);
+	isp_bandwidth_mgr.client_info[client].active = 1;
+	if (isp_bandwidth_mgr.use_count++) {
+		mutex_unlock(&bandwidth_mgr_mutex);
+		return rc;
+	}
+	isp_bandwidth_mgr.bus_client =
+		msm_bus_scale_register_client(&msm_isp_bus_client_pdata);
+	if (!isp_bandwidth_mgr.bus_client) {
+		pr_err("%s: client register failed\n", __func__);
+		mutex_unlock(&bandwidth_mgr_mutex);
+		return -EINVAL;
+	}
+
+	isp_bandwidth_mgr.bus_vector_active_idx = 1;
+	msm_bus_scale_client_update_request(
+	   isp_bandwidth_mgr.bus_client,
+	   isp_bandwidth_mgr.bus_vector_active_idx);
+
+	mutex_unlock(&bandwidth_mgr_mutex);
+	return 0;
+}
+
+int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
+	uint64_t ab, uint64_t ib)
+{
+	int i;
+	struct msm_bus_paths *path;
+
+	mutex_lock(&bandwidth_mgr_mutex);
+	if (!isp_bandwidth_mgr.use_count ||
+		!isp_bandwidth_mgr.bus_client) {
+		pr_err("%s:error bandwidth manager inactive use_cnt:%d bus_clnt:%d\n",
+			__func__, isp_bandwidth_mgr.use_count,
+			isp_bandwidth_mgr.bus_client);
+		return -EINVAL;
+	}
+
+	isp_bandwidth_mgr.client_info[client].ab = ab;
+	isp_bandwidth_mgr.client_info[client].ib = ib;
+	ALT_VECTOR_IDX(isp_bandwidth_mgr.bus_vector_active_idx);
+	path =
+		&(msm_isp_bus_client_pdata.usecase[
+		  isp_bandwidth_mgr.bus_vector_active_idx]);
+	path->vectors[0].ab = 0;
+	path->vectors[0].ib = 0;
+	for (i = 0; i < MAX_ISP_CLIENT; i++) {
+		if (isp_bandwidth_mgr.client_info[i].active) {
+			path->vectors[0].ab +=
+				isp_bandwidth_mgr.client_info[i].ab;
+			path->vectors[0].ib +=
+				isp_bandwidth_mgr.client_info[i].ib;
+		}
+	}
+	ISP_DBG("%s: Total AB = %llu IB = %llu\n", __func__,
+			path->vectors[0].ab, path->vectors[0].ib);
+	msm_bus_scale_client_update_request(isp_bandwidth_mgr.bus_client,
+		isp_bandwidth_mgr.bus_vector_active_idx);
+	/* Insert into circular buffer */
+	msm_isp_update_req_history(isp_bandwidth_mgr.bus_client,
+		path->vectors[0].ab,
+		path->vectors[0].ib,
+		isp_bandwidth_mgr.client_info,
+		sched_clock());
+	mutex_unlock(&bandwidth_mgr_mutex);
+	return 0;
+}
+
+void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client)
+{
+	if (client >= MAX_ISP_CLIENT) {
+		pr_err("invalid Client id %d", client);
+		return;
+	}
+	mutex_lock(&bandwidth_mgr_mutex);
+	memset(&isp_bandwidth_mgr.client_info[client], 0,
+		   sizeof(struct msm_isp_bandwidth_info));
+	if (--isp_bandwidth_mgr.use_count) {
+		mutex_unlock(&bandwidth_mgr_mutex);
+		return;
+	}
+
+	if (!isp_bandwidth_mgr.bus_client) {
+		pr_err("%s:%d error: bus client invalid\n", __func__, __LINE__);
+		mutex_unlock(&bandwidth_mgr_mutex);
+		return;
+	}
+
+	msm_bus_scale_client_update_request(
+	   isp_bandwidth_mgr.bus_client, 0);
+	msm_bus_scale_unregister_client(isp_bandwidth_mgr.bus_client);
+	isp_bandwidth_mgr.bus_client = 0;
+	mutex_unlock(&bandwidth_mgr_mutex);
+}
+
+void msm_isp_util_get_bandwidth_stats(struct vfe_device *vfe_dev,
+				      struct msm_isp_statistics *stats)
+{
+	stats->isp_vfe0_active = isp_bandwidth_mgr.client_info[ISP_VFE0].active;
+	stats->isp_vfe0_ab = isp_bandwidth_mgr.client_info[ISP_VFE0].ab;
+	stats->isp_vfe0_ib = isp_bandwidth_mgr.client_info[ISP_VFE0].ib;
+
+	stats->isp_vfe1_active = isp_bandwidth_mgr.client_info[ISP_VFE1].active;
+	stats->isp_vfe1_ab = isp_bandwidth_mgr.client_info[ISP_VFE1].ab;
+	stats->isp_vfe1_ib = isp_bandwidth_mgr.client_info[ISP_VFE1].ib;
+
+	stats->isp_cpp_active = isp_bandwidth_mgr.client_info[ISP_CPP].active;
+	stats->isp_cpp_ab = isp_bandwidth_mgr.client_info[ISP_CPP].ab;
+	stats->isp_cpp_ib = isp_bandwidth_mgr.client_info[ISP_CPP].ib;
+	stats->last_overflow_ab = vfe_dev->msm_isp_last_overflow_ab;
+	stats->last_overflow_ib = vfe_dev->msm_isp_last_overflow_ib;
+	stats->vfe_clk_rate = vfe_dev->msm_isp_vfe_clk_rate;
+	stats->cpp_clk_rate = msm_isp_cpp_clk_rate;
+}
+
+void msm_isp_util_update_last_overflow_ab_ib(struct vfe_device *vfe_dev)
+{
+	struct msm_bus_paths *path;
+	 path = &(msm_isp_bus_client_pdata.usecase[
+		  isp_bandwidth_mgr.bus_vector_active_idx]);
+	vfe_dev->msm_isp_last_overflow_ab = path->vectors[0].ab;
+	vfe_dev->msm_isp_last_overflow_ib = path->vectors[0].ib;
+}
+
+void msm_isp_util_update_clk_rate(long clock_rate)
+{
+	msm_isp_cpp_clk_rate = clock_rate;
+}
+
+uint32_t msm_isp_get_framedrop_period(
+	enum msm_vfe_frame_skip_pattern frame_skip_pattern)
+{
+	switch (frame_skip_pattern) {
+	case NO_SKIP:
+	case EVERY_2FRAME:
+	case EVERY_3FRAME:
+	case EVERY_4FRAME:
+	case EVERY_5FRAME:
+	case EVERY_6FRAME:
+	case EVERY_7FRAME:
+	case EVERY_8FRAME:
+		return frame_skip_pattern + 1;
+	case EVERY_16FRAME:
+		return 16;
+	case EVERY_32FRAME:
+		return 32;
+	case SKIP_ALL:
+		return 1;
+	default:
+		return 1;
+	}
+	return 1;
+}
+
+int msm_isp_get_clk_info(struct vfe_device *vfe_dev,
+	struct platform_device *pdev, struct msm_cam_clk_info *vfe_clk_info)
+{
+	int i, count, rc;
+	uint32_t rates[VFE_CLK_INFO_MAX];
+
+	struct device_node *of_node;
+
+	of_node = pdev->dev.of_node;
+
+	count = of_property_count_strings(of_node, "clock-names");
+
+	ISP_DBG("count = %d\n", count);
+	if (count <= 0) {
+		pr_err("no clocks found in device tree, count=%d", count);
+		return 0;
+	}
+
+	if (count > VFE_CLK_INFO_MAX) {
+		pr_err("invalid count=%d, max is %d\n", count,
+			VFE_CLK_INFO_MAX);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "clock-names",
+				i, &(vfe_clk_info[i].clk_name));
+		ISP_DBG("clock-names[%d] = %s\n", i, vfe_clk_info[i].clk_name);
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			return rc;
+		}
+	}
+	rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
+		rates, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return rc;
+	}
+	for (i = 0; i < count; i++) {
+		vfe_clk_info[i].clk_rate =
+			(rates[i] == 0) ? (long)-1 : rates[i];
+		ISP_DBG("clk_rate[%d] = %ld\n", i, vfe_clk_info[i].clk_rate);
+	}
+	vfe_dev->num_clk = count;
+	return 0;
+}
+
+void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp,
+	struct vfe_device *vfe_dev)
+{
+	struct timespec ts;
+
+	do_gettimeofday(&(time_stamp->event_time));
+	if (vfe_dev->vt_enable) {
+		msm_isp_get_avtimer_ts(time_stamp);
+		time_stamp->buf_time.tv_sec    = time_stamp->vt_time.tv_sec;
+		time_stamp->buf_time.tv_usec   = time_stamp->vt_time.tv_usec;
+	} else {
+		get_monotonic_boottime(&ts);
+		time_stamp->buf_time.tv_sec    = ts.tv_sec;
+		time_stamp->buf_time.tv_usec   = ts.tv_nsec/1000;
+	}
+}
+
+int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub)
+{
+	struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+	int rc = 0;
+
+	rc = v4l2_event_subscribe(fh, sub, MAX_ISP_V4l2_EVENTS, NULL);
+	if (rc == 0) {
+		if (sub->type == V4L2_EVENT_ALL) {
+			int i;
+
+			vfe_dev->axi_data.event_mask = 0;
+			for (i = 0; i < ISP_EVENT_MAX; i++)
+				vfe_dev->axi_data.event_mask |= (1 << i);
+		} else {
+			int event_idx = sub->type - ISP_EVENT_BASE;
+
+			vfe_dev->axi_data.event_mask |= (1 << event_idx);
+		}
+	}
+	return rc;
+}
+
+int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub)
+{
+	struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+	int rc = 0;
+
+	rc = v4l2_event_unsubscribe(fh, sub);
+	if (sub->type == V4L2_EVENT_ALL) {
+		vfe_dev->axi_data.event_mask = 0;
+	} else {
+		int event_idx = sub->type - ISP_EVENT_BASE;
+
+		vfe_dev->axi_data.event_mask &= ~(1 << event_idx);
+	}
+	return rc;
+}
+
+static int msm_isp_get_max_clk_rate(struct vfe_device *vfe_dev, long *rate)
+{
+	int           clk_idx = 0;
+	unsigned long max_value = ~0;
+	long          round_rate = 0;
+
+	if (!vfe_dev || !rate) {
+		pr_err("%s:%d failed: vfe_dev %pK rate %pK\n",
+			__func__, __LINE__,	vfe_dev, rate);
+		return -EINVAL;
+	}
+
+	*rate = 0;
+	if (!vfe_dev->hw_info) {
+		pr_err("%s:%d failed: vfe_dev->hw_info %pK\n", __func__,
+			__LINE__, vfe_dev->hw_info);
+		return -EINVAL;
+	}
+
+	clk_idx = vfe_dev->hw_info->vfe_clk_idx;
+	if (clk_idx >= vfe_dev->num_clk) {
+		pr_err("%s:%d failed: clk_idx %d max array size %d\n",
+			__func__, __LINE__, clk_idx,
+			vfe_dev->num_clk);
+		return -EINVAL;
+	}
+
+	round_rate = clk_round_rate(vfe_dev->vfe_clk[clk_idx], max_value);
+	if (round_rate < 0) {
+		pr_err("%s: Invalid vfe clock rate\n", __func__);
+		return -EINVAL;
+	}
+
+	*rate = round_rate;
+	return 0;
+}
+
+static int msm_isp_set_clk_rate(struct vfe_device *vfe_dev, long *rate)
+{
+	int rc = 0;
+	int clk_idx = vfe_dev->hw_info->vfe_clk_idx;
+	long round_rate =
+		clk_round_rate(vfe_dev->vfe_clk[clk_idx], *rate);
+	if (round_rate < 0) {
+		pr_err("%s: Invalid vfe clock rate\n", __func__);
+		return round_rate;
+	}
+
+	rc = clk_set_rate(vfe_dev->vfe_clk[clk_idx], round_rate);
+	if (rc < 0) {
+		pr_err("%s: Vfe set rate error\n", __func__);
+		return rc;
+	}
+	*rate = round_rate;
+	vfe_dev->msm_isp_vfe_clk_rate = round_rate;
+	return 0;
+}
+
+void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
+	struct msm_vfe_fetch_engine_info *fetch_engine_info)
+{
+	struct msm_isp32_event_data fe_rd_done_event;
+
+	if (!fetch_engine_info->is_busy)
+		return;
+	memset(&fe_rd_done_event, 0, sizeof(struct msm_isp32_event_data));
+	fe_rd_done_event.frame_id =
+		vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+	fe_rd_done_event.u.buf_done.session_id = fetch_engine_info->session_id;
+	fe_rd_done_event.u.buf_done.stream_id = fetch_engine_info->stream_id;
+	fe_rd_done_event.u.buf_done.handle = fetch_engine_info->bufq_handle;
+	fe_rd_done_event.u.buf_done.buf_idx = fetch_engine_info->buf_idx;
+	ISP_DBG("%s:VFE%d ISP_EVENT_FE_READ_DONE buf_idx %d\n",
+		__func__, vfe_dev->pdev->id, fetch_engine_info->buf_idx);
+	fetch_engine_info->is_busy = 0;
+	msm_isp_send_event(vfe_dev, ISP_EVENT_FE_READ_DONE, &fe_rd_done_event);
+}
+
+static int msm_isp_cfg_pix(struct vfe_device *vfe_dev,
+	struct msm_vfe_input_cfg *input_cfg)
+{
+	int rc = 0;
+
+	if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+		pr_err("%s: src %d path is active\n", __func__, VFE_PIX_0);
+		return -EINVAL;
+	}
+
+	vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock =
+		input_cfg->input_pix_clk;
+	vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux =
+		input_cfg->d.pix_cfg.input_mux;
+	vfe_dev->axi_data.src_info[VFE_PIX_0].input_format =
+		input_cfg->d.pix_cfg.input_format;
+
+	rc = msm_isp_set_clk_rate(vfe_dev,
+		&vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock);
+	if (rc < 0) {
+		pr_err("%s: clock set rate failed\n", __func__);
+		return rc;
+	}
+
+	ISP_DBG("%s: input mux is %d CAMIF %d io_format 0x%x\n", __func__,
+		input_cfg->d.pix_cfg.input_mux, CAMIF,
+		input_cfg->d.pix_cfg.input_format);
+
+	if (input_cfg->d.pix_cfg.input_mux == CAMIF) {
+		vfe_dev->axi_data.src_info[VFE_PIX_0].width =
+			input_cfg->d.pix_cfg.camif_cfg.pixels_per_line;
+	} else if (input_cfg->d.pix_cfg.input_mux == EXTERNAL_READ) {
+		vfe_dev->axi_data.src_info[VFE_PIX_0].width =
+			input_cfg->d.pix_cfg.fetch_engine_cfg.buf_stride;
+	}
+	vfe_dev->hw_info->vfe_ops.core_ops.cfg_input_mux(
+			vfe_dev, &input_cfg->d.pix_cfg);
+	return rc;
+}
+
+static int msm_isp_cfg_rdi(struct vfe_device *vfe_dev,
+	struct msm_vfe_input_cfg *input_cfg)
+{
+	int rc = 0;
+
+	if (vfe_dev->axi_data.src_info[input_cfg->input_src].active) {
+		pr_err("%s: RAW%d path is active\n", __func__,
+			   input_cfg->input_src - VFE_RAW_0);
+		return -EINVAL;
+	}
+
+	vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock =
+		input_cfg->input_pix_clk;
+	vfe_dev->hw_info->vfe_ops.core_ops.cfg_rdi_reg(
+		vfe_dev, &input_cfg->d.rdi_cfg, input_cfg->input_src);
+	return rc;
+}
+
+int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0;
+	struct msm_vfe_input_cfg *input_cfg = arg;
+
+	switch (input_cfg->input_src) {
+	case VFE_PIX_0:
+		rc = msm_isp_cfg_pix(vfe_dev, input_cfg);
+		break;
+	case VFE_RAW_0:
+	case VFE_RAW_1:
+	case VFE_RAW_2:
+		rc = msm_isp_cfg_rdi(vfe_dev, input_cfg);
+		break;
+	default:
+		pr_err("%s: Invalid input source\n", __func__);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static int msm_isp_proc_cmd_list_unlocked(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0;
+	uint32_t count = 0;
+	struct msm_vfe_cfg_cmd_list *proc_cmd =
+		(struct msm_vfe_cfg_cmd_list *)arg;
+	struct msm_vfe_cfg_cmd_list cmd, cmd_next;
+	struct msm_vfe_cfg_cmd2 cfg_cmd;
+
+	if (!vfe_dev || !arg) {
+		pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
+			vfe_dev, arg);
+		return -EINVAL;
+	}
+
+	rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd->cfg_cmd);
+	if (rc < 0)
+		pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+	cmd = *proc_cmd;
+
+	while (cmd.next) {
+		if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list)) {
+			pr_err("%s:%d failed: next size %u != expected %zu\n",
+				__func__, __LINE__, cmd.next_size,
+				sizeof(struct msm_vfe_cfg_cmd_list));
+			break;
+		}
+		if (++count >= MAX_ISP_REG_LIST) {
+			pr_err("%s:%d Error exceeding the max register count:%u\n",
+				__func__, __LINE__, count);
+			rc = -EFAULT;
+			break;
+		}
+		if (copy_from_user(&cmd_next, (void __user *)cmd.next,
+			sizeof(struct msm_vfe_cfg_cmd_list))) {
+			rc = -EFAULT;
+			continue;
+		}
+
+		cfg_cmd = cmd_next.cfg_cmd;
+
+		rc = msm_isp_proc_cmd(vfe_dev, &cfg_cmd);
+		if (rc < 0)
+			pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+		cmd = cmd_next;
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_vfe_cfg_cmd2_32 {
+	uint16_t num_cfg;
+	uint16_t cmd_len;
+	compat_caddr_t cfg_data;
+	compat_caddr_t cfg_cmd;
+};
+
+struct msm_vfe_cfg_cmd_list_32 {
+	struct msm_vfe_cfg_cmd2_32   cfg_cmd;
+	compat_caddr_t               next;
+	uint32_t                     next_size;
+};
+
+#define VIDIOC_MSM_VFE_REG_CFG_COMPAT \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_vfe_cfg_cmd2_32)
+#define VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT \
+	_IOWR('V', BASE_VIDIOC_PRIVATE+14, struct msm_vfe_cfg_cmd_list_32)
+
+static void msm_isp_compat_to_proc_cmd(struct msm_vfe_cfg_cmd2 *proc_cmd,
+	struct msm_vfe_cfg_cmd2_32 *proc_cmd_ptr32)
+{
+	proc_cmd->num_cfg = proc_cmd_ptr32->num_cfg;
+	proc_cmd->cmd_len = proc_cmd_ptr32->cmd_len;
+	proc_cmd->cfg_data = compat_ptr(proc_cmd_ptr32->cfg_data);
+	proc_cmd->cfg_cmd = compat_ptr(proc_cmd_ptr32->cfg_cmd);
+}
+
+static int msm_isp_proc_cmd_list_compat(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0;
+	uint32_t count = 0;
+	struct msm_vfe_cfg_cmd_list_32 *proc_cmd =
+		(struct msm_vfe_cfg_cmd_list_32 *)arg;
+	struct msm_vfe_cfg_cmd_list_32 cmd, cmd_next;
+	struct msm_vfe_cfg_cmd2 current_cmd;
+
+	if (!vfe_dev || !arg) {
+		pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
+			vfe_dev, arg);
+		return -EINVAL;
+	}
+	msm_isp_compat_to_proc_cmd(&current_cmd, &proc_cmd->cfg_cmd);
+	rc = msm_isp_proc_cmd(vfe_dev, &current_cmd);
+	if (rc < 0)
+		pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+	cmd = *proc_cmd;
+
+	while (compat_ptr(cmd.next) != NULL) {
+		if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list_32)) {
+			pr_err("%s:%d failed: next size %u != expected %zu\n",
+				__func__, __LINE__, cmd.next_size,
+				sizeof(struct msm_vfe_cfg_cmd_list));
+			break;
+		}
+		if (++count >= MAX_ISP_REG_LIST) {
+			pr_err("%s:%d Error exceeding the max register count:%u\n",
+				__func__, __LINE__, count);
+			rc = -EFAULT;
+			break;
+		}
+		if (copy_from_user(&cmd_next, compat_ptr(cmd.next),
+			sizeof(struct msm_vfe_cfg_cmd_list_32))) {
+			rc = -EFAULT;
+			continue;
+		}
+
+		msm_isp_compat_to_proc_cmd(&current_cmd, &cmd_next.cfg_cmd);
+		rc = msm_isp_proc_cmd(vfe_dev, &current_cmd);
+		if (rc < 0)
+			pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+		cmd = cmd_next;
+	}
+	return rc;
+}
+
+static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
+{
+	if (is_compat_task())
+		return msm_isp_proc_cmd_list_compat(vfe_dev, arg);
+	else
+		return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
+}
+#else /* CONFIG_COMPAT */
+static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
+{
+	return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
+}
+#endif /* CONFIG_COMPAT */
+
+
+static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	long rc = 0;
+	struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+
+	if (!vfe_dev || !vfe_dev->vfe_base) {
+		pr_err("%s:%d failed: invalid params %pK\n",
+			__func__, __LINE__, vfe_dev);
+		if (vfe_dev)
+			pr_err("%s:%d failed %pK\n", __func__,
+				__LINE__, vfe_dev->vfe_base);
+		return -EINVAL;
+	}
+
+	/* use real time mutex for hard real-time ioctls such as
+	 * buffer operations and register updates.
+	 * Use core mutex for other ioctls that could take
+	 * longer time to complete such as start/stop ISP streams
+	 * which blocks until the hardware start/stop streaming
+	 */
+	ISP_DBG("%s cmd: %d\n", __func__, _IOC_TYPE(cmd));
+	switch (cmd) {
+	case VIDIOC_MSM_VFE_REG_CFG: {
+		mutex_lock(&vfe_dev->realtime_mutex);
+		rc = msm_isp_proc_cmd(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		break;
+	}
+	case VIDIOC_MSM_VFE_REG_LIST_CFG: {
+		mutex_lock(&vfe_dev->realtime_mutex);
+		rc = msm_isp_proc_cmd_list(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		break;
+	}
+	case VIDIOC_MSM_ISP_REQUEST_BUF:
+	case VIDIOC_MSM_ISP_ENQUEUE_BUF:
+	case VIDIOC_MSM_ISP_RELEASE_BUF: {
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	}
+	case VIDIOC_MSM_ISP32_REQUEST_STREAM:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_request_axi_stream(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_RELEASE_STREAM:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_release_axi_stream(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_CFG_STREAM:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_cfg_axi_stream(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_AXI_HALT:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_axi_halt(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_AXI_RESET:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_axi_reset(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_AXI_RESTART:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_axi_restart(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_INPUT_CFG:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_cfg_input(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_FETCH_ENG_START:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = vfe_dev->hw_info->vfe_ops.core_ops.
+			start_fetch_eng(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_REG_UPDATE_CMD:
+		if (arg) {
+			enum msm_vfe_input_src frame_src =
+				*((enum msm_vfe_input_src *)arg);
+			vfe_dev->hw_info->vfe_ops.core_ops.
+				reg_update(vfe_dev, (1 << frame_src));
+			vfe_dev->axi_data.src_info[frame_src].last_updt_frm_id =
+			  vfe_dev->axi_data.src_info[frame_src].frame_id;
+		}
+		break;
+	case VIDIOC_MSM_ISP_SET_SRC_STATE:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_set_src_state(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_REQUEST_STATS_STREAM:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_request_stats_stream(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_RELEASE_STATS_STREAM:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_release_stats_stream(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_CFG_STATS_STREAM:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_cfg_stats_stream(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_UPDATE_STATS_STREAM:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_update_stats_stream(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_UPDATE_STREAM:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_update_axi_stream(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case VIDIOC_MSM_ISP_SMMU_ATTACH:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_smmu_attach(vfe_dev->buf_mgr, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case MSM_SD_NOTIFY_FREEZE:
+		vfe_dev->isp_sof_debug = 0;
+		break;
+	case VIDIOC_MSM_ISP_BUF_DONE:
+		mutex_lock(&vfe_dev->core_mutex);
+		rc = msm_isp_user_buf_done(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->core_mutex);
+		break;
+	case MSM_SD_SHUTDOWN:
+		while (vfe_dev->vfe_open_cnt != 0)
+			msm_isp_close_node(sd, NULL);
+		break;
+
+	default:
+		pr_err_ratelimited("%s: Invalid ISP command\n", __func__);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+
+#ifdef CONFIG_COMPAT
+static long msm_isp_ioctl_compat(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+	long rc = 0;
+
+	if (!vfe_dev || !vfe_dev->vfe_base) {
+		pr_err("%s:%d failed: invalid params %pK\n",
+			__func__, __LINE__, vfe_dev);
+		if (vfe_dev)
+			pr_err("%s:%d failed %pK\n", __func__,
+				__LINE__, vfe_dev->vfe_base);
+		return -EINVAL;
+	}
+
+	switch (cmd) {
+	case VIDIOC_MSM_VFE_REG_CFG_COMPAT: {
+		struct msm_vfe_cfg_cmd2 proc_cmd;
+
+		mutex_lock(&vfe_dev->realtime_mutex);
+		msm_isp_compat_to_proc_cmd(&proc_cmd,
+			(struct msm_vfe_cfg_cmd2_32 *) arg);
+		rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		break;
+	}
+	case VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT: {
+		mutex_lock(&vfe_dev->realtime_mutex);
+		rc = msm_isp_proc_cmd_list(vfe_dev, arg);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		break;
+	}
+	default:
+		return msm_isp_ioctl_unlocked(sd, cmd, arg);
+	}
+
+	return rc;
+}
+
+long msm_isp_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	return msm_isp_ioctl_compat(sd, cmd, arg);
+}
+#else /* CONFIG_COMPAT */
+long msm_isp_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	return msm_isp_ioctl_unlocked(sd, cmd, arg);
+}
+#endif /* CONFIG_COMPAT */
+
+static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
+	struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd,
+	uint32_t *cfg_data, uint32_t cmd_len)
+{
+	if (!vfe_dev || !reg_cfg_cmd) {
+		pr_err("%s:%d failed: vfe_dev %pK reg_cfg_cmd %pK\n", __func__,
+			__LINE__, vfe_dev, reg_cfg_cmd);
+		return -EINVAL;
+	}
+	if ((reg_cfg_cmd->cmd_type != VFE_CFG_MASK) &&
+		(!cfg_data || !cmd_len)) {
+		pr_err("%s:%d failed: cmd type %d cfg_data %pK cmd_len %d\n",
+			__func__, __LINE__, reg_cfg_cmd->cmd_type, cfg_data,
+			cmd_len);
+		return -EINVAL;
+	}
+
+	/* Validate input parameters */
+	switch (reg_cfg_cmd->cmd_type) {
+	case VFE_WRITE:
+	case VFE_READ:
+	case VFE_WRITE_MB: {
+		if ((reg_cfg_cmd->u.rw_info.reg_offset >
+			(UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
+			((reg_cfg_cmd->u.rw_info.reg_offset +
+			reg_cfg_cmd->u.rw_info.len) >
+			resource_size(vfe_dev->vfe_mem)) ||
+			(reg_cfg_cmd->u.rw_info.reg_offset & 0x3)) {
+			pr_err("%s:%d reg_offset %d len %d res %d\n",
+				__func__, __LINE__,
+				reg_cfg_cmd->u.rw_info.reg_offset,
+				reg_cfg_cmd->u.rw_info.len,
+				(uint32_t)resource_size(vfe_dev->vfe_mem));
+			return -EINVAL;
+		}
+
+		if ((reg_cfg_cmd->u.rw_info.cmd_data_offset >
+			(UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
+			((reg_cfg_cmd->u.rw_info.cmd_data_offset +
+			reg_cfg_cmd->u.rw_info.len) > cmd_len)) {
+			pr_err("%s:%d cmd_data_offset %d len %d cmd_len %d\n",
+				__func__, __LINE__,
+				reg_cfg_cmd->u.rw_info.cmd_data_offset,
+				reg_cfg_cmd->u.rw_info.len, cmd_len);
+			return -EINVAL;
+		}
+		break;
+	}
+
+	case VFE_WRITE_DMI_16BIT:
+	case VFE_WRITE_DMI_32BIT:
+	case VFE_WRITE_DMI_64BIT:
+	case VFE_READ_DMI_16BIT:
+	case VFE_READ_DMI_32BIT:
+	case VFE_READ_DMI_64BIT: {
+		if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT ||
+			reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
+			if ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset <=
+				reg_cfg_cmd->u.dmi_info.lo_tbl_offset) ||
+				(reg_cfg_cmd->u.dmi_info.hi_tbl_offset -
+				reg_cfg_cmd->u.dmi_info.lo_tbl_offset !=
+				(sizeof(uint32_t)))) {
+				pr_err("%s:%d hi %d lo %d\n",
+					__func__, __LINE__,
+					reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
+					reg_cfg_cmd->u.dmi_info.hi_tbl_offset);
+				return -EINVAL;
+			}
+			if (reg_cfg_cmd->u.dmi_info.len <= sizeof(uint32_t)) {
+				pr_err("%s:%d len %d\n",
+					__func__, __LINE__,
+					reg_cfg_cmd->u.dmi_info.len);
+				return -EINVAL;
+			}
+			if (((UINT_MAX -
+				reg_cfg_cmd->u.dmi_info.hi_tbl_offset) <
+				(reg_cfg_cmd->u.dmi_info.len -
+				sizeof(uint32_t))) ||
+				((reg_cfg_cmd->u.dmi_info.hi_tbl_offset +
+				reg_cfg_cmd->u.dmi_info.len -
+				sizeof(uint32_t)) > cmd_len)) {
+				pr_err("%s:%d hi_tbl_offset %d len %d cmd %d\n",
+					__func__, __LINE__,
+					reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
+					reg_cfg_cmd->u.dmi_info.len, cmd_len);
+				return -EINVAL;
+			}
+		}
+		if ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset >
+			(UINT_MAX - reg_cfg_cmd->u.dmi_info.len)) ||
+			((reg_cfg_cmd->u.dmi_info.lo_tbl_offset +
+			reg_cfg_cmd->u.dmi_info.len) > cmd_len)) {
+			pr_err("%s:%d lo_tbl_offset %d len %d cmd_len %d\n",
+				__func__, __LINE__,
+				reg_cfg_cmd->u.dmi_info.lo_tbl_offset,
+				reg_cfg_cmd->u.dmi_info.len, cmd_len);
+			return -EINVAL;
+		}
+		break;
+	}
+
+	default:
+		break;
+	}
+
+	switch (reg_cfg_cmd->cmd_type) {
+	case VFE_WRITE: {
+		msm_camera_io_memcpy(vfe_dev->vfe_base +
+			reg_cfg_cmd->u.rw_info.reg_offset,
+			cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4,
+			reg_cfg_cmd->u.rw_info.len);
+		break;
+	}
+	case VFE_WRITE_MB: {
+		msm_camera_io_memcpy_mb(vfe_dev->vfe_base +
+			reg_cfg_cmd->u.rw_info.reg_offset,
+			cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4,
+			reg_cfg_cmd->u.rw_info.len);
+		break;
+	}
+	case VFE_CFG_MASK: {
+		uint32_t temp;
+
+		if ((UINT_MAX - sizeof(temp) <
+			reg_cfg_cmd->u.mask_info.reg_offset) ||
+			(resource_size(vfe_dev->vfe_mem) <
+			reg_cfg_cmd->u.mask_info.reg_offset +
+			sizeof(temp)) ||
+			(reg_cfg_cmd->u.mask_info.reg_offset & 0x3)) {
+			pr_err("%s: VFE_CFG_MASK: Invalid length\n", __func__);
+			return -EINVAL;
+		}
+		temp = msm_camera_io_r(vfe_dev->vfe_base +
+			reg_cfg_cmd->u.mask_info.reg_offset);
+
+		temp &= ~reg_cfg_cmd->u.mask_info.mask;
+		temp |= reg_cfg_cmd->u.mask_info.val;
+		msm_camera_io_w(temp, vfe_dev->vfe_base +
+			reg_cfg_cmd->u.mask_info.reg_offset);
+		break;
+	}
+	case VFE_WRITE_DMI_16BIT:
+	case VFE_WRITE_DMI_32BIT:
+	case VFE_WRITE_DMI_64BIT: {
+		int i;
+		uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
+		uint32_t hi_val, lo_val, lo_val1;
+
+		if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT) {
+			hi_tbl_ptr = cfg_data +
+				reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
+		}
+		lo_tbl_ptr = cfg_data +
+			reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
+		if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT)
+			reg_cfg_cmd->u.dmi_info.len =
+				reg_cfg_cmd->u.dmi_info.len / 2;
+		for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
+			lo_val = *lo_tbl_ptr++;
+			if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_16BIT) {
+				lo_val1 = lo_val & 0x0000FFFF;
+				lo_val = (lo_val & 0xFFFF0000)>>16;
+				msm_camera_io_w(lo_val1, vfe_dev->vfe_base +
+					vfe_dev->hw_info->dmi_reg_offset + 0x4);
+			} else if (reg_cfg_cmd->cmd_type ==
+					   VFE_WRITE_DMI_64BIT) {
+				lo_tbl_ptr++;
+				hi_val = *hi_tbl_ptr;
+				hi_tbl_ptr = hi_tbl_ptr + 2;
+				msm_camera_io_w(hi_val, vfe_dev->vfe_base +
+					vfe_dev->hw_info->dmi_reg_offset);
+			}
+			msm_camera_io_w(lo_val, vfe_dev->vfe_base +
+				vfe_dev->hw_info->dmi_reg_offset + 0x4);
+		}
+		break;
+	}
+	case VFE_READ_DMI_16BIT:
+	case VFE_READ_DMI_32BIT:
+	case VFE_READ_DMI_64BIT: {
+		int i;
+		uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
+		uint32_t hi_val, lo_val, lo_val1;
+
+		if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
+			hi_tbl_ptr = cfg_data +
+				reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
+		}
+
+		lo_tbl_ptr = cfg_data +
+			reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
+
+		if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT)
+			reg_cfg_cmd->u.dmi_info.len =
+				reg_cfg_cmd->u.dmi_info.len / 2;
+
+		for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
+			lo_val = msm_camera_io_r(vfe_dev->vfe_base +
+					vfe_dev->hw_info->dmi_reg_offset + 0x4);
+
+			if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_16BIT) {
+				lo_val1 = msm_camera_io_r(vfe_dev->vfe_base +
+					vfe_dev->hw_info->dmi_reg_offset + 0x4);
+				lo_val |= lo_val1 << 16;
+			}
+			*lo_tbl_ptr++ = lo_val;
+			if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
+				hi_val = msm_camera_io_r(vfe_dev->vfe_base +
+					vfe_dev->hw_info->dmi_reg_offset);
+				*hi_tbl_ptr = hi_val;
+				hi_tbl_ptr += 2;
+				lo_tbl_ptr++;
+			}
+		}
+		break;
+	}
+	case VFE_HW_UPDATE_LOCK: {
+		uint32_t update_id =
+			vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id;
+		if (update_id) {
+			ISP_DBG("%s hw_update_lock fail cur_id %u,last_id %u\n",
+				__func__,
+				vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
+				update_id);
+			return -EINVAL;
+		}
+		break;
+	}
+	case VFE_HW_UPDATE_UNLOCK: {
+		if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id
+			!= *cfg_data) {
+			ISP_DBG("hw_updt over frm bound,strt_id %u end_id %d\n",
+				*cfg_data,
+				vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+		}
+		vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id =
+			vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+		break;
+	}
+	case VFE_READ: {
+		int i;
+		uint32_t *data_ptr = cfg_data +
+			reg_cfg_cmd->u.rw_info.cmd_data_offset/4;
+		for (i = 0; i < reg_cfg_cmd->u.rw_info.len/4; i++) {
+			if ((data_ptr < cfg_data) ||
+				(UINT_MAX / sizeof(*data_ptr) <
+				 (data_ptr - cfg_data)) ||
+				(sizeof(*data_ptr) * (data_ptr - cfg_data) >=
+				 cmd_len))
+				return -EINVAL;
+			*data_ptr++ = msm_camera_io_r(vfe_dev->vfe_base +
+				reg_cfg_cmd->u.rw_info.reg_offset);
+			reg_cfg_cmd->u.rw_info.reg_offset += 4;
+		}
+		break;
+	}
+	case GET_MAX_CLK_RATE: {
+		int rc = 0;
+		unsigned long rate;
+
+		if (cmd_len != sizeof(__u32)) {
+			pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+				__func__, __LINE__, cmd_len,
+				sizeof(__u32));
+			return -EINVAL;
+		}
+		rc = msm_isp_get_max_clk_rate(vfe_dev, &rate);
+		if (rc < 0) {
+			pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
+			return -EINVAL;
+		}
+
+		*(__u32 *)cfg_data = (__u32)rate;
+
+		break;
+	}
+	case GET_ISP_ID: {
+		uint32_t *isp_id = NULL;
+
+		if (cmd_len < sizeof(uint32_t)) {
+			pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+				__func__, __LINE__, cmd_len,
+				sizeof(uint32_t));
+			return -EINVAL;
+		}
+
+		isp_id = (uint32_t *)cfg_data;
+		*isp_id = vfe_dev->pdev->id;
+		break;
+	}
+	case SET_WM_UB_SIZE:
+		break;
+	case SET_UB_POLICY: {
+
+		if (cmd_len < sizeof(vfe_dev->vfe_ub_policy)) {
+			pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+				__func__, __LINE__, cmd_len,
+				sizeof(vfe_dev->vfe_ub_policy));
+			return -EINVAL;
+		}
+		vfe_dev->vfe_ub_policy = *cfg_data;
+		break;
+	}
+	default:
+		break;
+	}
+	return 0;
+}
+
+int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg)
+{
+	int rc = 0, i;
+	struct msm_vfe_cfg_cmd2 *proc_cmd = arg;
+	struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd;
+	uint32_t *cfg_data = NULL;
+
+	if (!proc_cmd->num_cfg) {
+		pr_err("%s: Passed num_cfg as 0\n", __func__);
+		return -EINVAL;
+	}
+
+	reg_cfg_cmd = kzalloc(sizeof(struct msm_vfe_reg_cfg_cmd)*
+		proc_cmd->num_cfg, GFP_KERNEL);
+	if (!reg_cfg_cmd) {
+		rc = -ENOMEM;
+		goto reg_cfg_failed;
+	}
+
+	if (copy_from_user(reg_cfg_cmd,
+		(void __user *)(proc_cmd->cfg_cmd),
+		sizeof(struct msm_vfe_reg_cfg_cmd) * proc_cmd->num_cfg)) {
+		rc = -EFAULT;
+		goto copy_cmd_failed;
+	}
+
+	if (proc_cmd->cmd_len > 0 &&
+		proc_cmd->cmd_len < UINT16_MAX) {
+		cfg_data = kzalloc(proc_cmd->cmd_len, GFP_KERNEL);
+		if (!cfg_data) {
+			pr_err("%s: cfg_data alloc failed\n", __func__);
+			rc = -ENOMEM;
+			goto cfg_data_failed;
+		}
+
+		if (copy_from_user(cfg_data,
+			(void __user *)(proc_cmd->cfg_data),
+			proc_cmd->cmd_len)) {
+			rc = -EFAULT;
+			goto copy_cmd_failed;
+		}
+	}
+
+	for (i = 0; i < proc_cmd->num_cfg; i++)
+		rc = msm_isp_send_hw_cmd(vfe_dev, &reg_cfg_cmd[i],
+			cfg_data, proc_cmd->cmd_len);
+
+	if (copy_to_user(proc_cmd->cfg_data,
+			cfg_data, proc_cmd->cmd_len)) {
+		rc = -EFAULT;
+		goto copy_cmd_failed;
+	}
+
+copy_cmd_failed:
+	kfree(cfg_data);
+cfg_data_failed:
+	kfree(reg_cfg_cmd);
+reg_cfg_failed:
+	return rc;
+}
+
+int msm_isp_send_event(struct vfe_device *vfe_dev,
+	uint32_t event_type,
+	struct msm_isp32_event_data *event_data)
+{
+	struct v4l2_event isp_event;
+
+	memset(&isp_event, 0, sizeof(struct v4l2_event));
+	isp_event.id = 0;
+	isp_event.type = event_type;
+
+	memcpy(&isp_event.u.data[0], event_data,
+		sizeof(struct msm_isp32_event_data));
+	v4l2_event_queue(vfe_dev->subdev.sd.devnode, &isp_event);
+	return 0;
+}
+
+#define CAL_WORD(width, M, N) ((width * M + N - 1) / N)
+
+int msm_isp_cal_word_per_line(uint32_t output_format,
+	uint32_t pixel_per_line)
+{
+	int val = -1;
+
+	switch (output_format) {
+	case V4L2_PIX_FMT_SBGGR8:
+	case V4L2_PIX_FMT_SGBRG8:
+	case V4L2_PIX_FMT_SGRBG8:
+	case V4L2_PIX_FMT_SRGGB8:
+	case V4L2_PIX_FMT_QBGGR8:
+	case V4L2_PIX_FMT_QGBRG8:
+	case V4L2_PIX_FMT_QGRBG8:
+	case V4L2_PIX_FMT_QRGGB8:
+	case V4L2_PIX_FMT_JPEG:
+	case V4L2_PIX_FMT_META:
+		val = CAL_WORD(pixel_per_line, 1, 8);
+		break;
+	case V4L2_PIX_FMT_SBGGR10:
+	case V4L2_PIX_FMT_SGBRG10:
+	case V4L2_PIX_FMT_SGRBG10:
+	case V4L2_PIX_FMT_SRGGB10:
+		val = CAL_WORD(pixel_per_line, 5, 32);
+		break;
+	case V4L2_PIX_FMT_SBGGR12:
+	case V4L2_PIX_FMT_SGBRG12:
+	case V4L2_PIX_FMT_SGRBG12:
+	case V4L2_PIX_FMT_SRGGB12:
+		val = CAL_WORD(pixel_per_line, 3, 16);
+		break;
+	case V4L2_PIX_FMT_SBGGR14:
+	case V4L2_PIX_FMT_SGBRG14:
+	case V4L2_PIX_FMT_SGRBG14:
+	case V4L2_PIX_FMT_SRGGB14:
+		val = CAL_WORD(pixel_per_line, 7, 32);
+		break;
+	case V4L2_PIX_FMT_QBGGR10:
+	case V4L2_PIX_FMT_QGBRG10:
+	case V4L2_PIX_FMT_QGRBG10:
+	case V4L2_PIX_FMT_QRGGB10:
+		val = CAL_WORD(pixel_per_line, 1, 6);
+		break;
+	case V4L2_PIX_FMT_QBGGR12:
+	case V4L2_PIX_FMT_QGBRG12:
+	case V4L2_PIX_FMT_QGRBG12:
+	case V4L2_PIX_FMT_QRGGB12:
+		val = CAL_WORD(pixel_per_line, 1, 5);
+		break;
+	case V4L2_PIX_FMT_QBGGR14:
+	case V4L2_PIX_FMT_QGBRG14:
+	case V4L2_PIX_FMT_QGRBG14:
+	case V4L2_PIX_FMT_QRGGB14:
+		val = CAL_WORD(pixel_per_line, 1, 4);
+		break;
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV21:
+	case V4L2_PIX_FMT_NV14:
+	case V4L2_PIX_FMT_NV41:
+	case V4L2_PIX_FMT_NV16:
+	case V4L2_PIX_FMT_NV61:
+		val = CAL_WORD(pixel_per_line, 1, 8);
+		break;
+	case V4L2_PIX_FMT_YUYV:
+	case V4L2_PIX_FMT_YVYU:
+	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_VYUY:
+		val = CAL_WORD(pixel_per_line, 2, 8);
+	break;
+	case V4L2_PIX_FMT_P16BGGR10:
+	case V4L2_PIX_FMT_P16GBRG10:
+	case V4L2_PIX_FMT_P16GRBG10:
+	case V4L2_PIX_FMT_P16RGGB10:
+		val = CAL_WORD(pixel_per_line, 1, 4);
+	break;
+		/*TD: Add more image format*/
+	default:
+		msm_isp_print_fourcc_error(__func__, output_format);
+		break;
+	}
+	return val;
+}
+
+enum msm_isp_pack_fmt msm_isp_get_pack_format(uint32_t output_format)
+{
+	switch (output_format) {
+	case V4L2_PIX_FMT_SBGGR8:
+	case V4L2_PIX_FMT_SGBRG8:
+	case V4L2_PIX_FMT_SGRBG8:
+	case V4L2_PIX_FMT_SRGGB8:
+	case V4L2_PIX_FMT_SBGGR10:
+	case V4L2_PIX_FMT_SGBRG10:
+	case V4L2_PIX_FMT_SGRBG10:
+	case V4L2_PIX_FMT_SRGGB10:
+	case V4L2_PIX_FMT_SBGGR12:
+	case V4L2_PIX_FMT_SGBRG12:
+	case V4L2_PIX_FMT_SGRBG12:
+	case V4L2_PIX_FMT_SRGGB12:
+	case V4L2_PIX_FMT_SBGGR14:
+	case V4L2_PIX_FMT_SGBRG14:
+	case V4L2_PIX_FMT_SGRBG14:
+	case V4L2_PIX_FMT_SRGGB14:
+		return MIPI;
+	case V4L2_PIX_FMT_QBGGR8:
+	case V4L2_PIX_FMT_QGBRG8:
+	case V4L2_PIX_FMT_QGRBG8:
+	case V4L2_PIX_FMT_QRGGB8:
+	case V4L2_PIX_FMT_QBGGR10:
+	case V4L2_PIX_FMT_QGBRG10:
+	case V4L2_PIX_FMT_QGRBG10:
+	case V4L2_PIX_FMT_QRGGB10:
+	case V4L2_PIX_FMT_QBGGR12:
+	case V4L2_PIX_FMT_QGBRG12:
+	case V4L2_PIX_FMT_QGRBG12:
+	case V4L2_PIX_FMT_QRGGB12:
+	case V4L2_PIX_FMT_QBGGR14:
+	case V4L2_PIX_FMT_QGBRG14:
+	case V4L2_PIX_FMT_QGRBG14:
+	case V4L2_PIX_FMT_QRGGB14:
+		return QCOM;
+	case V4L2_PIX_FMT_P16BGGR10:
+	case V4L2_PIX_FMT_P16GBRG10:
+	case V4L2_PIX_FMT_P16GRBG10:
+	case V4L2_PIX_FMT_P16RGGB10:
+		return PLAIN16;
+	default:
+		msm_isp_print_fourcc_error(__func__, output_format);
+		break;
+	}
+	return -EINVAL;
+}
+
+int msm_isp_get_bit_per_pixel(uint32_t output_format)
+{
+	switch (output_format) {
+	case V4L2_PIX_FMT_Y4:
+		return 4;
+	case V4L2_PIX_FMT_Y6:
+		return 6;
+	case V4L2_PIX_FMT_SBGGR8:
+	case V4L2_PIX_FMT_SGBRG8:
+	case V4L2_PIX_FMT_SGRBG8:
+	case V4L2_PIX_FMT_SRGGB8:
+	case V4L2_PIX_FMT_QBGGR8:
+	case V4L2_PIX_FMT_QGBRG8:
+	case V4L2_PIX_FMT_QGRBG8:
+	case V4L2_PIX_FMT_QRGGB8:
+	case V4L2_PIX_FMT_JPEG:
+	case V4L2_PIX_FMT_META:
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV21:
+	case V4L2_PIX_FMT_NV14:
+	case V4L2_PIX_FMT_NV41:
+	case V4L2_PIX_FMT_YVU410:
+	case V4L2_PIX_FMT_YVU420:
+	case V4L2_PIX_FMT_YUYV:
+	case V4L2_PIX_FMT_YYUV:
+	case V4L2_PIX_FMT_YVYU:
+	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_VYUY:
+	case V4L2_PIX_FMT_YUV422P:
+	case V4L2_PIX_FMT_YUV411P:
+	case V4L2_PIX_FMT_Y41P:
+	case V4L2_PIX_FMT_YUV444:
+	case V4L2_PIX_FMT_YUV555:
+	case V4L2_PIX_FMT_YUV565:
+	case V4L2_PIX_FMT_YUV32:
+	case V4L2_PIX_FMT_YUV410:
+	case V4L2_PIX_FMT_YUV420:
+	case V4L2_PIX_FMT_GREY:
+	case V4L2_PIX_FMT_PAL8:
+	case V4L2_PIX_FMT_UV8:
+	case MSM_V4L2_PIX_FMT_META:
+		return 8;
+	case V4L2_PIX_FMT_SBGGR10:
+	case V4L2_PIX_FMT_SGBRG10:
+	case V4L2_PIX_FMT_SGRBG10:
+	case V4L2_PIX_FMT_SRGGB10:
+	case V4L2_PIX_FMT_QBGGR10:
+	case V4L2_PIX_FMT_QGBRG10:
+	case V4L2_PIX_FMT_QGRBG10:
+	case V4L2_PIX_FMT_QRGGB10:
+	case V4L2_PIX_FMT_Y10:
+	case V4L2_PIX_FMT_Y10BPACK:
+	case V4L2_PIX_FMT_P16BGGR10:
+	case V4L2_PIX_FMT_P16GBRG10:
+	case V4L2_PIX_FMT_P16GRBG10:
+	case V4L2_PIX_FMT_P16RGGB10:
+		return 10;
+	case V4L2_PIX_FMT_SBGGR12:
+	case V4L2_PIX_FMT_SGBRG12:
+	case V4L2_PIX_FMT_SGRBG12:
+	case V4L2_PIX_FMT_SRGGB12:
+	case V4L2_PIX_FMT_QBGGR12:
+	case V4L2_PIX_FMT_QGBRG12:
+	case V4L2_PIX_FMT_QGRBG12:
+	case V4L2_PIX_FMT_QRGGB12:
+	case V4L2_PIX_FMT_Y12:
+		return 12;
+	case V4L2_PIX_FMT_SBGGR14:
+	case V4L2_PIX_FMT_SGBRG14:
+	case V4L2_PIX_FMT_SGRBG14:
+	case V4L2_PIX_FMT_SRGGB14:
+	case V4L2_PIX_FMT_QBGGR14:
+	case V4L2_PIX_FMT_QGBRG14:
+	case V4L2_PIX_FMT_QGRBG14:
+	case V4L2_PIX_FMT_QRGGB14:
+		return 14;
+	case V4L2_PIX_FMT_NV16:
+	case V4L2_PIX_FMT_NV61:
+	case V4L2_PIX_FMT_Y16:
+		return 16;
+		/*TD: Add more image format*/
+	default:
+		msm_isp_print_fourcc_error(__func__, output_format);
+		pr_err("%s: Invalid output format %x\n",
+			__func__, output_format);
+		return -EINVAL;
+	}
+}
+
+void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev)
+{
+	struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
+
+	error_info->info_dump_frame_count++;
+}
+
+void msm_isp_process_error_info(struct vfe_device *vfe_dev)
+{
+	int i;
+	uint8_t num_stats_type =
+		vfe_dev->hw_info->stats_hw_info->num_stats_type;
+	struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
+	static DEFINE_RATELIMIT_STATE(rs,
+		DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+	static DEFINE_RATELIMIT_STATE(rs_stats,
+		DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+
+	if (error_info->error_count == 1 ||
+		!(error_info->info_dump_frame_count % 100)) {
+		vfe_dev->hw_info->vfe_ops.core_ops.
+			process_error_status(vfe_dev);
+		error_info->error_mask0 = 0;
+		error_info->error_mask1 = 0;
+		error_info->camif_status = 0;
+		error_info->violation_status = 0;
+		for (i = 0; i < MAX_NUM_STREAM; i++) {
+			if (error_info->stream_framedrop_count[i] != 0 &&
+				__ratelimit(&rs)) {
+				pr_err("%s: Stream[%d]: dropped %d frames\n",
+					__func__, i,
+					error_info->stream_framedrop_count[i]);
+				error_info->stream_framedrop_count[i] = 0;
+			}
+		}
+		for (i = 0; i < num_stats_type; i++) {
+			if (error_info->stats_framedrop_count[i] != 0 &&
+				__ratelimit(&rs_stats)) {
+				pr_err("%s: Stats stream[%d]: dropped %d frames\n",
+					__func__, i,
+					error_info->stats_framedrop_count[i]);
+				error_info->stats_framedrop_count[i] = 0;
+			}
+		}
+	}
+}
+
+static inline void msm_isp_update_error_info(struct vfe_device *vfe_dev,
+	uint32_t error_mask0, uint32_t error_mask1)
+{
+	vfe_dev->error_info.error_mask0 |= error_mask0;
+	vfe_dev->error_info.error_mask1 |= error_mask1;
+	vfe_dev->error_info.error_count++;
+}
+
+static void msm_isp_process_overflow_irq(
+	struct vfe_device *vfe_dev,
+	uint32_t *irq_status0, uint32_t *irq_status1)
+{
+	uint32_t overflow_mask;
+
+	/* if there are no active streams - do not start recovery */
+	if (!vfe_dev->axi_data.num_active_stream)
+		return;
+
+	/*Mask out all other irqs if recovery is started*/
+	if (atomic_read(&vfe_dev->error_info.overflow_state) != NO_OVERFLOW) {
+		uint32_t halt_restart_mask0, halt_restart_mask1;
+
+		vfe_dev->hw_info->vfe_ops.core_ops.
+		get_halt_restart_mask(&halt_restart_mask0,
+			&halt_restart_mask1);
+		*irq_status0 &= halt_restart_mask0;
+		*irq_status1 &= halt_restart_mask1;
+
+		return;
+	}
+
+	/*Check if any overflow bit is set*/
+	vfe_dev->hw_info->vfe_ops.core_ops.
+		get_overflow_mask(&overflow_mask);
+	overflow_mask &= *irq_status1;
+
+	if (overflow_mask) {
+		struct msm_isp32_event_data error_event;
+
+		if (vfe_dev->reset_pending == 1) {
+			pr_err("%s:%d failed: overflow %x during reset\n",
+				__func__, __LINE__, overflow_mask);
+			/* Clear overflow bits since reset is pending */
+			*irq_status1 &= ~overflow_mask;
+			return;
+		}
+
+		ISP_DBG("%s: Bus overflow detected: 0x%x, start recovery!\n",
+				__func__, overflow_mask);
+		atomic_set(&vfe_dev->error_info.overflow_state,
+				OVERFLOW_DETECTED);
+		/*Store current IRQ mask*/
+		vfe_dev->hw_info->vfe_ops.core_ops.get_irq_mask(vfe_dev,
+			&vfe_dev->error_info.overflow_recover_irq_mask0,
+			&vfe_dev->error_info.overflow_recover_irq_mask1);
+
+		/*Halt the hardware & Clear all other IRQ mask*/
+		vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 0);
+
+		/*Stop CAMIF Immediately*/
+		vfe_dev->hw_info->vfe_ops.core_ops.
+			update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
+
+		/*Update overflow state*/
+		*irq_status0 = 0;
+		*irq_status1 = 0;
+
+		memset(&error_event, 0, sizeof(error_event));
+		error_event.frame_id =
+			vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+		error_event.u.error_info.error_mask = 1 << ISP_WM_BUS_OVERFLOW;
+		msm_isp_send_event(vfe_dev,
+			ISP_EVENT_WM_BUS_OVERFLOW, &error_event);
+	}
+}
+
+void msm_isp_reset_burst_count_and_frame_drop(
+	struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info)
+{
+	uint32_t framedrop_period = 0;
+
+	if (stream_info->state != ACTIVE ||
+		stream_info->stream_type != BURST_STREAM) {
+		return;
+	}
+	if (stream_info->num_burst_capture != 0) {
+		framedrop_period = msm_isp_get_framedrop_period(
+		   stream_info->frame_skip_pattern);
+		stream_info->burst_frame_count =
+			stream_info->init_frame_drop +
+			(stream_info->num_burst_capture - 1) *
+			framedrop_period + 1;
+		msm_isp_reset_framedrop(vfe_dev, stream_info);
+	}
+}
+
+irqreturn_t msm_isp_process_irq(int irq_num, void *data)
+{
+	unsigned long flags;
+	struct msm_vfe_tasklet_queue_cmd *queue_cmd;
+	struct vfe_device *vfe_dev = (struct vfe_device *) data;
+	uint32_t irq_status0, irq_status1;
+	uint32_t error_mask0, error_mask1;
+
+	vfe_dev->hw_info->vfe_ops.irq_ops.
+		read_irq_status(vfe_dev, &irq_status0, &irq_status1);
+
+	if ((irq_status0 == 0) && (irq_status1 == 0)) {
+		pr_err_ratelimited("%s:VFE%d irq_status0 & 1 are both 0\n",
+			__func__, vfe_dev->pdev->id);
+		return IRQ_HANDLED;
+	}
+
+	msm_isp_process_overflow_irq(vfe_dev,
+		&irq_status0, &irq_status1);
+
+	vfe_dev->hw_info->vfe_ops.core_ops.
+		get_error_mask(&error_mask0, &error_mask1);
+	error_mask0 &= irq_status0;
+	error_mask1 &= irq_status1;
+	irq_status0 &= ~error_mask0;
+	irq_status1 &= ~error_mask1;
+	if (!vfe_dev->ignore_error &&
+		((error_mask0 != 0) || (error_mask1 != 0)))
+		msm_isp_update_error_info(vfe_dev, error_mask0, error_mask1);
+
+	if ((irq_status0 == 0) && (irq_status1 == 0) &&
+		(!(((error_mask0 != 0) || (error_mask1 != 0)) &&
+		 vfe_dev->error_info.error_count == 1))) {
+		ISP_DBG("%s: error_mask0/1 & error_count are set!\n", __func__);
+		return IRQ_HANDLED;
+	}
+
+	spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
+	queue_cmd = &vfe_dev->tasklet_queue_cmd[vfe_dev->taskletq_idx];
+	if (queue_cmd->cmd_used) {
+		pr_err_ratelimited("%s: Tasklet queue overflow: %d\n",
+			__func__, vfe_dev->pdev->id);
+		list_del(&queue_cmd->list);
+	} else {
+		atomic_add(1, &vfe_dev->irq_cnt);
+	}
+	queue_cmd->vfeInterruptStatus0 = irq_status0;
+	queue_cmd->vfeInterruptStatus1 = irq_status1;
+	msm_isp_get_timestamp(&queue_cmd->ts, vfe_dev);
+	queue_cmd->cmd_used = 1;
+	vfe_dev->taskletq_idx =
+		(vfe_dev->taskletq_idx + 1) % MSM_VFE_TASKLETQ_SIZE;
+	list_add_tail(&queue_cmd->list, &vfe_dev->tasklet_q);
+	spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+	tasklet_schedule(&vfe_dev->vfe_tasklet);
+	return IRQ_HANDLED;
+}
+
+void msm_isp_do_tasklet(unsigned long data)
+{
+	unsigned long flags;
+	struct vfe_device *vfe_dev = (struct vfe_device *) data;
+	struct msm_vfe_irq_ops *irq_ops = &vfe_dev->hw_info->vfe_ops.irq_ops;
+	struct msm_vfe_tasklet_queue_cmd *queue_cmd;
+	struct msm_isp_timestamp ts;
+	uint32_t irq_status0, irq_status1;
+
+	while (atomic_read(&vfe_dev->irq_cnt)) {
+		spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
+		queue_cmd = list_first_entry(&vfe_dev->tasklet_q,
+		struct msm_vfe_tasklet_queue_cmd, list);
+
+		if (!queue_cmd) {
+			atomic_set(&vfe_dev->irq_cnt, 0);
+			spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+			return;
+		}
+		atomic_sub(1, &vfe_dev->irq_cnt);
+		list_del(&queue_cmd->list);
+		queue_cmd->cmd_used = 0;
+		irq_status0 = queue_cmd->vfeInterruptStatus0;
+		irq_status1 = queue_cmd->vfeInterruptStatus1;
+		ts = queue_cmd->ts;
+		spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+		ISP_DBG("%s: status0: 0x%x status1: 0x%x\n",
+			__func__, irq_status0, irq_status1);
+		irq_ops->process_reset_irq(vfe_dev,
+			irq_status0, irq_status1);
+		irq_ops->process_halt_irq(vfe_dev,
+			irq_status0, irq_status1);
+		if (atomic_read(&vfe_dev->error_info.overflow_state)
+			!= NO_OVERFLOW) {
+			pr_err("%s: Recovery in processing, Ignore IRQs!!!\n",
+				__func__);
+			continue;
+		}
+		msm_isp_process_error_info(vfe_dev);
+		irq_ops->process_camif_irq(vfe_dev,
+			irq_status0, irq_status1, &ts);
+		irq_ops->process_axi_irq(vfe_dev,
+			irq_status0, irq_status1, &ts);
+		irq_ops->process_stats_irq(vfe_dev,
+			irq_status0, irq_status1, &ts);
+		irq_ops->process_reg_update(vfe_dev,
+			irq_status0, irq_status1, &ts);
+		irq_ops->process_epoch_irq(vfe_dev,
+			irq_status0, irq_status1, &ts);
+	}
+}
+
+int msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg)
+{
+	struct msm_vfe_axi_src_state *src_state = arg;
+
+	if (src_state->input_src >= VFE_SRC_MAX)
+		return -EINVAL;
+	vfe_dev->axi_data.src_info[src_state->input_src].active =
+	src_state->src_active;
+	vfe_dev->axi_data.src_info[src_state->input_src].frame_id =
+	src_state->src_frame_id;
+	return 0;
+}
+
+static void msm_vfe_iommu_fault_handler(struct iommu_domain *domain,
+	struct device *dev, unsigned long iova, int flags, void *token)
+{
+	struct vfe_device *vfe_dev = NULL;
+
+	if (token) {
+		vfe_dev = (struct vfe_device *)token;
+		if (!vfe_dev->buf_mgr || !vfe_dev->buf_mgr->ops) {
+			pr_err("%s:%d] buf_mgr %pK\n", __func__,
+				__LINE__, vfe_dev->buf_mgr);
+			goto end;
+		}
+		if (!vfe_dev->buf_mgr->pagefault_debug_disable) {
+			pr_err("%s:%d] vfe_dev %pK id %d\n", __func__,
+				__LINE__, vfe_dev, vfe_dev->pdev->id);
+			vfe_dev->buf_mgr->ops->buf_mgr_debug(vfe_dev->buf_mgr,
+								iova);
+		}
+	} else {
+		ISP_DBG("%s:%d] no token received: %pK\n",
+			__func__, __LINE__, token);
+		goto end;
+	}
+end:
+	return;
+}
+
+int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+	long rc = 0;
+
+	ISP_DBG("%s\n", __func__);
+
+	mutex_lock(&vfe_dev->realtime_mutex);
+	mutex_lock(&vfe_dev->core_mutex);
+
+	if (vfe_dev->vfe_open_cnt++ && vfe_dev->vfe_base) {
+		mutex_unlock(&vfe_dev->core_mutex);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		return 0;
+	}
+
+	if (vfe_dev->vfe_base) {
+		pr_err("%s:%d invalid params cnt %d base %pK\n", __func__,
+			__LINE__, vfe_dev->vfe_open_cnt, vfe_dev->vfe_base);
+		vfe_dev->vfe_base = NULL;
+	}
+
+	vfe_dev->reset_pending = 0;
+	vfe_dev->isp_sof_debug = 0;
+
+	if (vfe_dev->hw_info->vfe_ops.core_ops.init_hw(vfe_dev) < 0) {
+		pr_err("%s: init hardware failed\n", __func__);
+		vfe_dev->vfe_open_cnt--;
+		mutex_unlock(&vfe_dev->core_mutex);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		return -EBUSY;
+	}
+
+	memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+	atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
+
+	vfe_dev->hw_info->vfe_ops.core_ops.clear_status_reg(vfe_dev);
+
+	rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 1, 1);
+	if (rc <= 0) {
+		pr_err("%s: reset timeout\n", __func__);
+		vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
+		vfe_dev->vfe_open_cnt--;
+		mutex_unlock(&vfe_dev->core_mutex);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		return -EINVAL;
+	}
+	vfe_dev->vfe_hw_version = msm_camera_io_r(vfe_dev->vfe_base);
+	ISP_DBG("%s: HW Version: 0x%x\n", __func__, vfe_dev->vfe_hw_version);
+
+	vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
+
+	vfe_dev->buf_mgr->ops->buf_mgr_init(vfe_dev->buf_mgr, "msm_isp");
+
+	memset(&vfe_dev->axi_data, 0, sizeof(struct msm_vfe_axi_shared_data));
+	memset(&vfe_dev->stats_data, 0,
+		sizeof(struct msm_vfe_stats_shared_data));
+	memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+	memset(&vfe_dev->fetch_engine_info, 0,
+		sizeof(vfe_dev->fetch_engine_info));
+	vfe_dev->axi_data.hw_info = vfe_dev->hw_info->axi_hw_info;
+	vfe_dev->taskletq_idx = 0;
+	vfe_dev->vt_enable = 0;
+	vfe_dev->bus_util_factor = 0;
+	rc = of_property_read_u32(vfe_dev->pdev->dev.of_node,
+			"bus-util-factor", &vfe_dev->bus_util_factor);
+	if (rc < 0)
+		ISP_DBG("%s: Use default bus utilization factor\n", __func__);
+
+	cam_smmu_reg_client_page_fault_handler(
+			vfe_dev->buf_mgr->iommu_hdl,
+			msm_vfe_iommu_fault_handler,
+			NULL,
+			vfe_dev);
+
+	mutex_unlock(&vfe_dev->core_mutex);
+	mutex_unlock(&vfe_dev->realtime_mutex);
+	return 0;
+}
+
+#ifdef CONFIG_MSM_AVTIMER
+static void msm_isp_end_avtimer(void)
+{
+	avcs_core_disable_power_collapse(0);
+}
+#else
+static void msm_isp_end_avtimer(void)
+{
+	pr_err("AV Timer is not supported\n");
+}
+#endif
+
+int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	long rc = 0;
+	struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+
+	ISP_DBG("%s E\n", __func__);
+	mutex_lock(&vfe_dev->realtime_mutex);
+	mutex_lock(&vfe_dev->core_mutex);
+
+	if (!vfe_dev->vfe_open_cnt) {
+		pr_err("%s invalid state open cnt %d\n", __func__,
+			vfe_dev->vfe_open_cnt);
+		mutex_unlock(&vfe_dev->core_mutex);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		return -EINVAL;
+	}
+
+	if (--vfe_dev->vfe_open_cnt) {
+		mutex_unlock(&vfe_dev->core_mutex);
+		mutex_unlock(&vfe_dev->realtime_mutex);
+		return 0;
+	}
+
+	/* Unregister page fault handler */
+	cam_smmu_reg_client_page_fault_handler(
+		vfe_dev->buf_mgr->iommu_hdl,
+		NULL, NULL, vfe_dev);
+
+	rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
+	if (rc < 0)
+		pr_err("%s: halt timeout rc=%ld\n", __func__, rc);
+
+	vfe_dev->buf_mgr->ops->buf_mgr_deinit(vfe_dev->buf_mgr);
+	vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
+	if (vfe_dev->vt_enable) {
+		msm_isp_end_avtimer();
+		vfe_dev->vt_enable = 0;
+	}
+	mutex_unlock(&vfe_dev->core_mutex);
+	mutex_unlock(&vfe_dev->realtime_mutex);
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.h
new file mode 100644
index 0000000..f2268c3
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.h
@@ -0,0 +1,84 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_ISP_UTIL_H__
+#define __MSM_ISP_UTIL_H__
+
+#include "msm_isp_32.h"
+#include <soc/qcom/camera2.h>
+
+/* #define CONFIG_MSM_ISP_DBG 1 */
+
+#ifdef CONFIG_MSM_ISP_DBG
+#define ISP_DBG(fmt, args...) printk(fmt, ##args)
+#else
+#define ISP_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define ALT_VECTOR_IDX(x) {x = 3 - x; }
+
+struct msm_isp_bandwidth_mgr {
+	uint32_t bus_client;
+	uint32_t bus_vector_active_idx;
+	uint32_t use_count;
+	struct msm_isp_bandwidth_info client_info[MAX_ISP_CLIENT];
+};
+
+uint32_t msm_isp_get_framedrop_period(
+	enum msm_vfe_frame_skip_pattern frame_skip_pattern);
+void msm_isp_reset_burst_count_and_frame_drop(
+	struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info);
+
+int msm_isp_init_bandwidth_mgr(enum msm_isp_hw_client client);
+int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
+	uint64_t ab, uint64_t ib);
+void msm_isp_util_get_bandwidth_stats(struct vfe_device *vfe_dev,
+				      struct msm_isp_statistics *stats);
+void msm_isp_util_update_last_overflow_ab_ib(struct vfe_device *vfe_dev);
+void msm_isp_util_update_clk_rate(long clock_rate);
+void msm_isp_update_req_history(uint32_t client, uint64_t ab,
+				uint64_t ib,
+				struct msm_isp_bandwidth_info *client_info,
+				unsigned long long ts);
+void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client);
+
+int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub);
+
+int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub);
+
+int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_send_event(struct vfe_device *vfe_dev,
+	uint32_t type, struct msm_isp32_event_data *event_data);
+int msm_isp_cal_word_per_line(uint32_t output_format,
+	uint32_t pixel_per_line);
+int msm_isp_get_bit_per_pixel(uint32_t output_format);
+enum msm_isp_pack_fmt msm_isp_get_pack_format(uint32_t output_format);
+irqreturn_t msm_isp_process_irq(int irq_num, void *data);
+int msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_do_tasklet(unsigned long data);
+void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev);
+void msm_isp_process_error_info(struct vfe_device *vfe_dev);
+int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+long msm_isp_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
+int msm_isp_get_clk_info(struct vfe_device *vfe_dev,
+	struct platform_device *pdev, struct msm_cam_clk_info *vfe_clk_info);
+void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
+	struct msm_vfe_fetch_engine_info *fetch_engine_info);
+void msm_camera_io_dump_2(void __iomem *addr, int size);
+void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format);
+void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp,
+	struct vfe_device *vfe_dev);
+void msm_isp_get_avtimer_ts(struct msm_isp_timestamp *time_stamp);
+int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg);
+#endif /* __MSM_ISP_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/ispif/Makefile b/drivers/media/platform/msm/camera_v2/ispif/Makefile
index 236ec73..d56332d 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/Makefile
+++ b/drivers/media/platform/msm/camera_v2/ispif/Makefile
@@ -1,4 +1,8 @@
 ccflags-y += -Idrivers/media/platform/msm/camera_v2
 ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
 ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ifeq ($(CONFIG_MSM_ISP_V1),y)
+obj-$(CONFIG_MSM_CSID) += msm_ispif_32.o
+else
 obj-$(CONFIG_MSM_CSID) += msm_ispif.o
+endif
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.c
new file mode 100644
index 0000000..e9b2a1d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.c
@@ -0,0 +1,1581 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/of.h>
+#include <linux/videodev2.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/iopoll.h>
+#include <linux/compat.h>
+#include <media/msmb_isp.h>
+
+#include "msm_ispif_32.h"
+#include "msm.h"
+#include "msm_sd.h"
+#include "msm_camera_io_util.h"
+
+#ifdef CONFIG_MSM_ISPIF_V1
+#include "msm_ispif_hwreg_v1.h"
+#else
+#include "msm_ispif_hwreg_v2.h"
+#endif
+
+#define V4L2_IDENT_ISPIF                      50001
+#define MSM_ISPIF_DRV_NAME                    "msm_ispif"
+
+#define ISPIF_INTF_CMD_DISABLE_FRAME_BOUNDARY 0x00
+#define ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY  0x01
+#define ISPIF_INTF_CMD_DISABLE_IMMEDIATELY    0x02
+
+#define ISPIF_TIMEOUT_SLEEP_US                1000
+#define ISPIF_TIMEOUT_ALL_US               1000000
+
+#undef CDBG
+#ifdef CONFIG_MSMB_CAMERA_DEBUG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#else
+#define CDBG(fmt, args...) do { } while (0)
+#endif
+
+static void msm_ispif_io_dump_reg(struct ispif_device *ispif)
+{
+	if (!ispif->enb_dump_reg)
+		return;
+
+	if (!ispif->base) {
+		pr_err("%s: null pointer for the ispif base\n", __func__);
+		return;
+	}
+
+	msm_camera_io_dump(ispif->base, 0x250, 1);
+}
+
+
+static inline int msm_ispif_is_intf_valid(uint32_t csid_version,
+	uint8_t intf_type)
+{
+	return (((csid_version <= CSID_VERSION_V22
+					&& intf_type != VFE0) ||
+					(intf_type >= VFE_MAX))
+				? false : true);
+}
+
+static struct msm_cam_clk_info ispif_8626_reset_clk_info[] = {
+	{"ispif_ahb_clk", NO_SET_RATE},
+	{"csi0_src_clk", NO_SET_RATE},
+	{"csi0_clk", NO_SET_RATE},
+	{"csi0_pix_clk", NO_SET_RATE},
+	{"csi0_rdi_clk", NO_SET_RATE},
+	{"csi1_src_clk", NO_SET_RATE},
+	{"csi1_clk", NO_SET_RATE},
+	{"csi1_pix_clk", NO_SET_RATE},
+	{"csi1_rdi_clk", NO_SET_RATE},
+	{"camss_vfe_vfe0_clk", NO_SET_RATE},
+	{"camss_csi_vfe0_clk", NO_SET_RATE},
+};
+
+static struct msm_cam_clk_info ispif_8974_ahb_clk_info[ISPIF_CLK_INFO_MAX];
+
+static struct msm_cam_clk_info ispif_8974_reset_clk_info[] = {
+	{"csi0_src_clk", INIT_RATE},
+	{"csi0_clk", NO_SET_RATE},
+	{"csi0_pix_clk", NO_SET_RATE},
+	{"csi0_rdi_clk", NO_SET_RATE},
+	{"csi1_src_clk", INIT_RATE},
+	{"csi1_clk", NO_SET_RATE},
+	{"csi1_pix_clk", NO_SET_RATE},
+	{"csi1_rdi_clk", NO_SET_RATE},
+	{"csi2_src_clk", INIT_RATE},
+	{"csi2_clk", NO_SET_RATE},
+	{"csi2_pix_clk", NO_SET_RATE},
+	{"csi2_rdi_clk", NO_SET_RATE},
+	{"csi3_src_clk", INIT_RATE},
+	{"csi3_clk", NO_SET_RATE},
+	{"csi3_pix_clk", NO_SET_RATE},
+	{"csi3_rdi_clk", NO_SET_RATE},
+	{"vfe0_clk_src", INIT_RATE},
+	{"camss_vfe_vfe0_clk", NO_SET_RATE},
+	{"camss_csi_vfe0_clk", NO_SET_RATE},
+	{"vfe1_clk_src", INIT_RATE},
+	{"camss_vfe_vfe1_clk", NO_SET_RATE},
+	{"camss_csi_vfe1_clk", NO_SET_RATE},
+};
+
+static int msm_ispif_reset_hw(struct ispif_device *ispif)
+{
+	int rc = 0;
+	long timeout = 0;
+	struct clk *reset_clk[ARRAY_SIZE(ispif_8974_reset_clk_info)];
+	struct clk *reset_clk1[ARRAY_SIZE(ispif_8626_reset_clk_info)];
+
+	ispif->clk_idx = 0;
+
+	rc = msm_cam_clk_enable(&ispif->pdev->dev,
+		ispif_8974_reset_clk_info, reset_clk,
+		ARRAY_SIZE(ispif_8974_reset_clk_info), 1);
+	if (rc < 0) {
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8626_reset_clk_info, reset_clk1,
+			ARRAY_SIZE(ispif_8626_reset_clk_info), 1);
+		if (rc < 0) {
+			pr_err("%s: cannot enable clock, error = %d",
+				__func__, rc);
+		} else {
+			/* This is set when device is 8x26 */
+			ispif->clk_idx = 2;
+		}
+	} else {
+		/* This is set when device is 8974 */
+		ispif->clk_idx = 1;
+	}
+
+	init_completion(&ispif->reset_complete[VFE0]);
+	if (ispif->hw_num_isps > 1)
+		init_completion(&ispif->reset_complete[VFE1]);
+
+	/* initiate reset of ISPIF */
+	msm_camera_io_w(ISPIF_RST_CMD_MASK,
+				ispif->base + ISPIF_RST_CMD_ADDR);
+	if (ispif->hw_num_isps > 1)
+		msm_camera_io_w(ISPIF_RST_CMD_1_MASK,
+					ispif->base + ISPIF_RST_CMD_1_ADDR);
+
+	timeout = wait_for_completion_timeout(
+			&ispif->reset_complete[VFE0], msecs_to_jiffies(500));
+	CDBG("%s: VFE0 done\n", __func__);
+
+	if (timeout <= 0) {
+		pr_err("%s: VFE0 reset wait timeout\n", __func__);
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8974_reset_clk_info, reset_clk,
+			ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+		if (rc < 0) {
+			rc = msm_cam_clk_enable(&ispif->pdev->dev,
+				ispif_8626_reset_clk_info, reset_clk1,
+				ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
+			if (rc < 0)
+				pr_err("%s: VFE0 reset wait timeout\n",
+					 __func__);
+		}
+		return -ETIMEDOUT;
+	}
+
+	if (ispif->hw_num_isps > 1) {
+		timeout = wait_for_completion_timeout(
+				&ispif->reset_complete[VFE1],
+				msecs_to_jiffies(500));
+		CDBG("%s: VFE1 done\n", __func__);
+		if (timeout <= 0) {
+			pr_err("%s: VFE1 reset wait timeout\n", __func__);
+			msm_cam_clk_enable(&ispif->pdev->dev,
+				ispif_8974_reset_clk_info, reset_clk,
+				ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+			return -ETIMEDOUT;
+		}
+	}
+
+	if (ispif->clk_idx == 1) {
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8974_reset_clk_info, reset_clk,
+			ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+		if (rc < 0) {
+			pr_err("%s: cannot disable clock, error = %d",
+				__func__, rc);
+		}
+	}
+
+	if (ispif->clk_idx == 2) {
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8626_reset_clk_info, reset_clk1,
+			ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
+		if (rc < 0) {
+			pr_err("%s: cannot disable clock, error = %d",
+				__func__, rc);
+		}
+	}
+
+	return rc;
+}
+
+static int msm_ispif_get_ahb_clk_info(struct ispif_device *ispif_dev,
+	struct platform_device *pdev,
+	struct msm_cam_clk_info *ahb_clk_info)
+{
+	uint32_t num_ahb_clk = 0;
+	int i, count, rc;
+	uint32_t rates[ISPIF_CLK_INFO_MAX];
+
+	struct device_node *of_node;
+
+	of_node = pdev->dev.of_node;
+
+	count = of_property_count_strings(of_node, "clock-names");
+
+	CDBG("count = %d\n", count);
+	if (count <= 0) {
+		pr_err("no clocks found in device tree, count=%d", count);
+		return 0;
+	}
+
+	if (count > ISPIF_CLK_INFO_MAX) {
+		pr_err("invalid count=%d, max is %d\n", count,
+			ISPIF_CLK_INFO_MAX);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
+		rates, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return rc;
+	}
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "clock-names",
+				i, &(ahb_clk_info[num_ahb_clk].clk_name));
+		CDBG("clock-names[%d] = %s\n",
+			 i, ahb_clk_info[i].clk_name);
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			return rc;
+		}
+		if (strnstr(ahb_clk_info[num_ahb_clk].clk_name, "ahb",
+			sizeof(ahb_clk_info[num_ahb_clk].clk_name))) {
+			ahb_clk_info[num_ahb_clk].clk_rate =
+				(rates[i] == 0) ? (long)-1 : rates[i];
+			CDBG("clk_rate[%d] = %ld\n", i,
+				ahb_clk_info[i].clk_rate);
+			num_ahb_clk++;
+		}
+	}
+	ispif_dev->num_ahb_clk = num_ahb_clk;
+	return 0;
+}
+
+static int msm_ispif_clk_ahb_enable(struct ispif_device *ispif, int enable)
+{
+	int rc = 0;
+
+	if (ispif->csid_version < CSID_VERSION_V30) {
+		/* Older ISPIF versiond don't need ahb clokc */
+		return 0;
+	}
+
+	rc = msm_ispif_get_ahb_clk_info(ispif, ispif->pdev,
+		ispif_8974_ahb_clk_info);
+	if (rc < 0) {
+		pr_err("%s: msm_isp_get_clk_info() failed", __func__);
+			return -EFAULT;
+	}
+
+	rc = msm_cam_clk_enable(&ispif->pdev->dev,
+		ispif_8974_ahb_clk_info, ispif->ahb_clk,
+		ispif->num_ahb_clk, enable);
+	if (rc < 0) {
+		pr_err("%s: cannot enable clock, error = %d",
+			__func__, rc);
+	}
+
+	return rc;
+}
+
+static int msm_ispif_reset(struct ispif_device *ispif)
+{
+	int rc = 0;
+	int i;
+
+	if (WARN_ON(!ispif))
+		return -EINVAL;
+
+	memset(ispif->sof_count, 0, sizeof(ispif->sof_count));
+	for (i = 0; i < ispif->vfe_info.num_vfe; i++) {
+
+		msm_camera_io_w(1 << PIX0_LINE_BUF_EN_BIT,
+			ispif->base + ISPIF_VFE_m_CTRL_0(i));
+		msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(i));
+		msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(i));
+		msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(i));
+		msm_camera_io_w(0xFFFFFFFF, ispif->base +
+			ISPIF_VFE_m_IRQ_CLEAR_0(i));
+		msm_camera_io_w(0xFFFFFFFF, ispif->base +
+			ISPIF_VFE_m_IRQ_CLEAR_1(i));
+		msm_camera_io_w(0xFFFFFFFF, ispif->base +
+			ISPIF_VFE_m_IRQ_CLEAR_2(i));
+
+		msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_INPUT_SEL(i));
+
+		msm_camera_io_w(ISPIF_STOP_INTF_IMMEDIATELY,
+			ispif->base + ISPIF_VFE_m_INTF_CMD_0(i));
+		msm_camera_io_w(ISPIF_STOP_INTF_IMMEDIATELY,
+			ispif->base + ISPIF_VFE_m_INTF_CMD_1(i));
+		pr_debug("%s: base %pK", __func__, ispif->base);
+		msm_camera_io_w(0, ispif->base +
+			ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 0));
+		msm_camera_io_w(0, ispif->base +
+			ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 1));
+		msm_camera_io_w(0, ispif->base +
+			ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 0));
+		msm_camera_io_w(0, ispif->base +
+			ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 1));
+		msm_camera_io_w(0, ispif->base +
+			ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 2));
+
+		msm_camera_io_w(0, ispif->base +
+			ISPIF_VFE_m_PIX_INTF_n_CROP(i, 0));
+		msm_camera_io_w(0, ispif->base +
+			ISPIF_VFE_m_PIX_INTF_n_CROP(i, 1));
+	}
+
+	msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
+		ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+	return rc;
+}
+
+static void msm_ispif_sel_csid_core(struct ispif_device *ispif,
+	uint8_t intftype, uint8_t csid, uint8_t vfe_intf)
+{
+	uint32_t data;
+
+	if (WARN_ON(!ispif))
+		return;
+
+	if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+		pr_err("%s: invalid interface type\n", __func__);
+		return;
+	}
+
+	data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_INPUT_SEL(vfe_intf));
+	switch (intftype) {
+	case PIX0:
+		data &= ~(BIT(1) | BIT(0));
+		data |= (uint32_t)csid;
+		break;
+	case RDI0:
+		data &= ~(BIT(5) | BIT(4));
+		data |= (uint32_t)(csid << 4);
+		break;
+	case PIX1:
+		data &= ~(BIT(9) | BIT(8));
+		data |= (uint32_t)(csid << 8);
+		break;
+	case RDI1:
+		data &= ~(BIT(13) | BIT(12));
+		data |= (uint32_t)(csid << 12);
+		break;
+	case RDI2:
+		data &= ~(BIT(21) | BIT(20));
+		data |= (uint32_t)(csid << 20);
+		break;
+	}
+
+	msm_camera_io_w_mb(data, ispif->base +
+		ISPIF_VFE_m_INPUT_SEL(vfe_intf));
+}
+
+static void msm_ispif_enable_crop(struct ispif_device *ispif,
+	uint8_t intftype, uint8_t vfe_intf, uint16_t start_pixel,
+	uint16_t end_pixel)
+{
+	uint32_t data;
+
+	if (WARN_ON(!ispif))
+		return;
+
+	if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+		pr_err("%s: invalid interface type\n", __func__);
+		return;
+	}
+
+	data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_CTRL_0(vfe_intf));
+	data |= (1 << (intftype + 7));
+	if (intftype == PIX0)
+		data |= 1 << PIX0_LINE_BUF_EN_BIT;
+	msm_camera_io_w(data,
+		ispif->base + ISPIF_VFE_m_CTRL_0(vfe_intf));
+
+	if (intftype == PIX0)
+		msm_camera_io_w_mb(start_pixel | (end_pixel << 16),
+			ispif->base + ISPIF_VFE_m_PIX_INTF_n_CROP(vfe_intf, 0));
+	else if (intftype == PIX1)
+		msm_camera_io_w_mb(start_pixel | (end_pixel << 16),
+			ispif->base + ISPIF_VFE_m_PIX_INTF_n_CROP(vfe_intf, 1));
+	else {
+		pr_err("%s: invalid intftype=%d\n", __func__, intftype);
+		WARN_ON(1);
+		return;
+	}
+}
+
+static void msm_ispif_enable_intf_cids(struct ispif_device *ispif,
+	uint8_t intftype, uint16_t cid_mask, uint8_t vfe_intf, uint8_t enable)
+{
+	uint32_t intf_addr, data;
+
+	if (WARN_ON((!ispif)))
+		return;
+
+	if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+		pr_err("%s: invalid interface type\n", __func__);
+		return;
+	}
+
+	switch (intftype) {
+	case PIX0:
+		intf_addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe_intf, 0);
+		break;
+	case RDI0:
+		intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 0);
+		break;
+	case PIX1:
+		intf_addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe_intf, 1);
+		break;
+	case RDI1:
+		intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 1);
+		break;
+	case RDI2:
+		intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 2);
+		break;
+	default:
+		pr_err("%s: invalid intftype=%d\n", __func__, intftype);
+		WARN_ON(1);
+		return;
+	}
+
+	data = msm_camera_io_r(ispif->base + intf_addr);
+	if (enable)
+		data |= (uint32_t)cid_mask;
+	else
+		data &= ~((uint32_t)cid_mask);
+	msm_camera_io_w_mb(data, ispif->base + intf_addr);
+}
+
+static int msm_ispif_validate_intf_status(struct ispif_device *ispif,
+	uint8_t intftype, uint8_t vfe_intf)
+{
+	int rc = 0;
+	uint32_t data = 0;
+
+	if (WARN_ON((!ispif)))
+		return -ENODEV;
+
+	if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+		pr_err("%s: invalid interface type\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (intftype) {
+	case PIX0:
+		data = msm_camera_io_r(ispif->base +
+			ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0));
+		break;
+	case RDI0:
+		data = msm_camera_io_r(ispif->base +
+			ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0));
+		break;
+	case PIX1:
+		data = msm_camera_io_r(ispif->base +
+			ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1));
+		break;
+	case RDI1:
+		data = msm_camera_io_r(ispif->base +
+			ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1));
+		break;
+	case RDI2:
+		data = msm_camera_io_r(ispif->base +
+			ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2));
+		break;
+	}
+	if ((data & 0xf) != 0xf)
+		rc = -EBUSY;
+	return rc;
+}
+
+static void msm_ispif_select_clk_mux(struct ispif_device *ispif,
+	uint8_t intftype, uint8_t csid, uint8_t vfe_intf)
+{
+	uint32_t data = 0;
+
+	switch (intftype) {
+	case PIX0:
+		data = msm_camera_io_r(ispif->clk_mux_base);
+		data &= ~(0xf << (vfe_intf * 8));
+		data |= (csid << (vfe_intf * 8));
+		msm_camera_io_w(data, ispif->clk_mux_base);
+		break;
+
+	case RDI0:
+		data = msm_camera_io_r(ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		data &= ~(0xf << (vfe_intf * 12));
+		data |= (csid << (vfe_intf * 12));
+		msm_camera_io_w(data, ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		break;
+
+	case PIX1:
+		data = msm_camera_io_r(ispif->clk_mux_base);
+		data &= ~(0xf0 << (vfe_intf * 8));
+		data |= (csid << (4 + (vfe_intf * 8)));
+		msm_camera_io_w(data, ispif->clk_mux_base);
+		break;
+
+	case RDI1:
+		data = msm_camera_io_r(ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		data &= ~(0xf << (4 + (vfe_intf * 12)));
+		data |= (csid << (4 + (vfe_intf * 12)));
+		msm_camera_io_w(data, ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		break;
+
+	case RDI2:
+		data = msm_camera_io_r(ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		data &= ~(0xf << (8 + (vfe_intf * 12)));
+		data |= (csid << (8 + (vfe_intf * 12)));
+		msm_camera_io_w(data, ispif->clk_mux_base +
+			ISPIF_RDI_CLK_MUX_SEL_ADDR);
+		break;
+	}
+	CDBG("%s intftype %d data %x\n", __func__, intftype, data);
+	/* ensure clk mux is enabled */
+	mb();
+}
+
+static uint16_t msm_ispif_get_cids_mask_from_cfg(
+	struct msm_ispif_params_entry *entry)
+{
+	int i;
+	uint16_t cids_mask = 0;
+
+	if (WARN_ON(!entry)) {
+		pr_err("%s: invalid entry", __func__);
+		return cids_mask;
+	}
+
+	for (i = 0; i < entry->num_cids && i < MAX_CID_CH_PARAM_ENTRY; i++)
+		cids_mask |= (1 << entry->cids[i]);
+
+	return cids_mask;
+}
+
+static int msm_ispif_config(struct ispif_device *ispif,
+	struct msm_ispif_param_data *params)
+{
+	int rc = 0, i = 0;
+	uint16_t cid_mask;
+	enum msm_ispif_intftype intftype;
+	enum msm_ispif_vfe_intf vfe_intf;
+
+	if (WARN_ON(!ispif) || WARN_ON(!params))
+		return -EINVAL;
+
+	if (ispif->ispif_state != ISPIF_POWER_UP) {
+		pr_err("%s: ispif invalid state %d\n", __func__,
+			ispif->ispif_state);
+		rc = -EPERM;
+		return rc;
+	}
+	if (params->num > MAX_PARAM_ENTRIES) {
+		pr_err("%s: invalid param entries %d\n", __func__,
+			params->num);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	for (i = 0; i < params->num; i++) {
+		vfe_intf = params->entries[i].vfe_intf;
+		if (vfe_intf >= VFE_MAX) {
+			pr_err("%s: %d invalid i %d vfe_intf %d\n", __func__,
+				__LINE__, i, vfe_intf);
+			return -EINVAL;
+		}
+		if (!msm_ispif_is_intf_valid(ispif->csid_version,
+				vfe_intf)) {
+			pr_err("%s: invalid interface type\n", __func__);
+			return -EINVAL;
+		}
+		msm_camera_io_w(0x0, ispif->base +
+			ISPIF_VFE_m_IRQ_MASK_0(vfe_intf));
+		msm_camera_io_w(0x0, ispif->base +
+			ISPIF_VFE_m_IRQ_MASK_1(vfe_intf));
+		msm_camera_io_w_mb(0x0, ispif->base +
+			ISPIF_VFE_m_IRQ_MASK_2(vfe_intf));
+	}
+
+	for (i = 0; i < params->num; i++) {
+		intftype = params->entries[i].intftype;
+
+		vfe_intf = params->entries[i].vfe_intf;
+
+		CDBG("%s intftype %x, vfe_intf %d, csid %d\n", __func__,
+			intftype, vfe_intf, params->entries[i].csid);
+
+		if ((intftype >= INTF_MAX) ||
+			(vfe_intf >=  ispif->vfe_info.num_vfe) ||
+			(ispif->csid_version <= CSID_VERSION_V22 &&
+			(vfe_intf > VFE0))) {
+			pr_err("%s: VFEID %d and CSID version %d mismatch\n",
+				__func__, vfe_intf, ispif->csid_version);
+			return -EINVAL;
+		}
+
+		if (ispif->csid_version >= CSID_VERSION_V30)
+			msm_ispif_select_clk_mux(ispif, intftype,
+				params->entries[i].csid, vfe_intf);
+
+		rc = msm_ispif_validate_intf_status(ispif, intftype, vfe_intf);
+		if (rc) {
+			pr_err("%s:validate_intf_status failed, rc = %d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		msm_ispif_sel_csid_core(ispif, intftype,
+			params->entries[i].csid, vfe_intf);
+		cid_mask = msm_ispif_get_cids_mask_from_cfg(
+				&params->entries[i]);
+		msm_ispif_enable_intf_cids(ispif, intftype,
+			cid_mask, vfe_intf, 1);
+		if (params->entries[i].crop_enable)
+			msm_ispif_enable_crop(ispif, intftype, vfe_intf,
+				params->entries[i].crop_start_pixel,
+				params->entries[i].crop_end_pixel);
+	}
+
+	for (vfe_intf = 0; vfe_intf < 2; vfe_intf++) {
+		msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base +
+			ISPIF_VFE_m_IRQ_MASK_0(vfe_intf));
+
+		msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base +
+			ISPIF_VFE_m_IRQ_CLEAR_0(vfe_intf));
+
+		msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base +
+			ISPIF_VFE_m_IRQ_MASK_1(vfe_intf));
+
+		msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base +
+			ISPIF_VFE_m_IRQ_CLEAR_1(vfe_intf));
+
+		msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base +
+			ISPIF_VFE_m_IRQ_MASK_2(vfe_intf));
+
+		msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base +
+			ISPIF_VFE_m_IRQ_CLEAR_2(vfe_intf));
+	}
+
+	msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
+		ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+	return rc;
+}
+
+static int msm_ispif_intf_cmd(struct ispif_device *ispif, uint32_t cmd_bits,
+	struct msm_ispif_param_data *params)
+{
+	uint8_t vc;
+	int i, k;
+	enum msm_ispif_intftype intf_type;
+	enum msm_ispif_cid cid;
+	enum msm_ispif_vfe_intf vfe_intf;
+
+	if (WARN_ON(!ispif) || WARN_ON(!params))
+		return -EINVAL;
+
+	for (i = 0; i < params->num; i++) {
+		vfe_intf = params->entries[i].vfe_intf;
+		if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+			pr_err("%s: invalid interface type\n", __func__);
+			return -EINVAL;
+		}
+		if (params->entries[i].num_cids > MAX_CID_CH_PARAM_ENTRY) {
+			pr_err("%s: out of range of cid_num %d\n",
+				__func__, params->entries[i].num_cids);
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < params->num; i++) {
+		intf_type = params->entries[i].intftype;
+		vfe_intf = params->entries[i].vfe_intf;
+		for (k = 0; k < params->entries[i].num_cids; k++) {
+			cid = params->entries[i].cids[k];
+			vc = cid / 4;
+			if (intf_type == RDI2) {
+				/* zero out two bits */
+				ispif->applied_intf_cmd[vfe_intf].intf_cmd1 &=
+					~(0x3 << (vc * 2 + 8));
+				/* set cmd bits */
+				ispif->applied_intf_cmd[vfe_intf].intf_cmd1 |=
+					(cmd_bits << (vc * 2 + 8));
+			} else {
+				/* zero 2 bits */
+				ispif->applied_intf_cmd[vfe_intf].intf_cmd &=
+					~(0x3 << (vc * 2 + intf_type * 8));
+				/* set cmd bits */
+				ispif->applied_intf_cmd[vfe_intf].intf_cmd |=
+					(cmd_bits << (vc * 2 + intf_type * 8));
+			}
+		}
+		/* cmd for PIX0, PIX1, RDI0, RDI1 */
+		if (ispif->applied_intf_cmd[vfe_intf].intf_cmd != 0xFFFFFFFF)
+			msm_camera_io_w_mb(
+				ispif->applied_intf_cmd[vfe_intf].intf_cmd,
+				ispif->base + ISPIF_VFE_m_INTF_CMD_0(vfe_intf));
+
+		/* cmd for RDI2 */
+		if (ispif->applied_intf_cmd[vfe_intf].intf_cmd1 != 0xFFFFFFFF)
+			msm_camera_io_w_mb(
+				ispif->applied_intf_cmd[vfe_intf].intf_cmd1,
+				ispif->base + ISPIF_VFE_m_INTF_CMD_1(vfe_intf));
+	}
+	return 0;
+}
+
+static int msm_ispif_stop_immediately(struct ispif_device *ispif,
+	struct msm_ispif_param_data *params)
+{
+	int i, rc = 0;
+	uint16_t cid_mask = 0;
+
+	if (WARN_ON(!ispif) || WARN_ON(!params))
+		return -EINVAL;
+
+	if (ispif->ispif_state != ISPIF_POWER_UP) {
+		pr_err("%s: ispif invalid state %d\n", __func__,
+			ispif->ispif_state);
+		rc = -EPERM;
+		return rc;
+	}
+
+	if (params->num > MAX_PARAM_ENTRIES) {
+		pr_err("%s: invalid param entries %d\n", __func__,
+			params->num);
+		rc = -EINVAL;
+		return rc;
+	}
+	msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_DISABLE_IMMEDIATELY, params);
+
+	/* after stop the interface we need to unmask the CID enable bits */
+	for (i = 0; i < params->num; i++) {
+		cid_mask = msm_ispif_get_cids_mask_from_cfg(
+			&params->entries[i]);
+		msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype,
+			cid_mask, params->entries[i].vfe_intf, 0);
+	}
+
+	return rc;
+}
+
+static int msm_ispif_start_frame_boundary(struct ispif_device *ispif,
+	struct msm_ispif_param_data *params)
+{
+	int rc = 0;
+
+	if (ispif->ispif_state != ISPIF_POWER_UP) {
+		pr_err("%s: ispif invalid state %d\n", __func__,
+			ispif->ispif_state);
+		rc = -EPERM;
+		return rc;
+	}
+	if (params->num > MAX_PARAM_ENTRIES) {
+		pr_err("%s: invalid param entries %d\n", __func__,
+			params->num);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY, params);
+
+	return rc;
+}
+
+static int msm_ispif_restart_frame_boundary(struct ispif_device *ispif,
+	struct msm_ispif_param_data *params)
+{
+	int rc = 0, i;
+	long timeout = 0;
+	uint16_t cid_mask;
+	enum msm_ispif_intftype intftype;
+	enum msm_ispif_vfe_intf vfe_intf;
+	uint32_t vfe_mask = 0;
+	uint32_t intf_addr;
+	struct clk *reset_clk[ARRAY_SIZE(ispif_8974_reset_clk_info)];
+	struct clk *reset_clk1[ARRAY_SIZE(ispif_8626_reset_clk_info)];
+
+	ispif->clk_idx = 0;
+
+	if (ispif->ispif_state != ISPIF_POWER_UP) {
+		pr_err("%s: ispif invalid state %d\n", __func__,
+			ispif->ispif_state);
+		rc = -EPERM;
+		return rc;
+	}
+	if (params->num > MAX_PARAM_ENTRIES) {
+		pr_err("%s: invalid param entries %d\n", __func__,
+			params->num);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	for (i = 0; i < params->num; i++) {
+		vfe_intf = params->entries[i].vfe_intf;
+		if (vfe_intf >= VFE_MAX) {
+			pr_err("%s: %d invalid i %d vfe_intf %d\n", __func__,
+				__LINE__, i, vfe_intf);
+			return -EINVAL;
+		}
+		vfe_mask |= (1 << vfe_intf);
+	}
+
+	rc = msm_cam_clk_enable(&ispif->pdev->dev,
+		ispif_8974_reset_clk_info, reset_clk,
+		ARRAY_SIZE(ispif_8974_reset_clk_info), 1);
+	if (rc < 0) {
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8626_reset_clk_info, reset_clk1,
+			ARRAY_SIZE(ispif_8626_reset_clk_info), 1);
+		if (rc < 0) {
+			pr_err("%s: cannot enable clock, error = %d",
+				__func__, rc);
+		} else {
+			/* This is set when device is 8x26 */
+			ispif->clk_idx = 2;
+		}
+	} else {
+		/* This is set when device is 8974 */
+		ispif->clk_idx = 1;
+	}
+
+	if (vfe_mask & (1 << VFE0)) {
+		init_completion(&ispif->reset_complete[VFE0]);
+		pr_err("%s Init completion VFE0\n", __func__);
+			/* initiate reset of ISPIF */
+		msm_camera_io_w(0x00001FF9,
+				ispif->base + ISPIF_RST_CMD_ADDR);
+	}
+	if (ispif->hw_num_isps > 1 && (vfe_mask & (1 << VFE1))) {
+		init_completion(&ispif->reset_complete[VFE1]);
+		pr_err("%s Init completion VFE1\n", __func__);
+				msm_camera_io_w(0x00001FF9,
+					ispif->base + ISPIF_RST_CMD_1_ADDR);
+	}
+
+	if (vfe_mask & (1 << VFE0)) {
+		timeout = wait_for_completion_timeout(
+			&ispif->reset_complete[VFE0], msecs_to_jiffies(500));
+		if (timeout <= 0) {
+			pr_err("%s: VFE0 reset wait timeout\n", __func__);
+			rc = -ETIMEDOUT;
+			goto disable_clk;
+		}
+	}
+
+	if (ispif->hw_num_isps > 1  && (vfe_mask & (1 << VFE1))) {
+		timeout = wait_for_completion_timeout(
+				&ispif->reset_complete[VFE1],
+				msecs_to_jiffies(500));
+		if (timeout <= 0) {
+			pr_err("%s: VFE1 reset wait timeout\n", __func__);
+			rc = -ETIMEDOUT;
+			goto disable_clk;
+		}
+	}
+
+	pr_info("%s: ISPIF reset hw done", __func__);
+
+	if (ispif->clk_idx == 1) {
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8974_reset_clk_info, reset_clk,
+			ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+		if (rc < 0) {
+			pr_err("%s: cannot disable clock, error = %d",
+				__func__, rc);
+			goto end;
+		}
+	}
+
+	if (ispif->clk_idx == 2) {
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8626_reset_clk_info, reset_clk1,
+			ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
+		if (rc < 0) {
+			pr_err("%s: cannot disable clock, error = %d",
+				__func__, rc);
+			goto end;
+		}
+	}
+
+	for (i = 0; i < params->num; i++) {
+		intftype = params->entries[i].intftype;
+		vfe_intf = params->entries[i].vfe_intf;
+
+		switch (params->entries[0].intftype) {
+		case PIX0:
+			intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0);
+			break;
+		case RDI0:
+			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0);
+			break;
+		case PIX1:
+			intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1);
+			break;
+		case RDI1:
+			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1);
+			break;
+		case RDI2:
+			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2);
+			break;
+		default:
+			pr_err("%s: invalid intftype=%d\n", __func__,
+			params->entries[i].intftype);
+			rc = -EPERM;
+			goto end;
+		}
+
+		msm_ispif_intf_cmd(ispif,
+			ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY, params);
+	}
+
+	for (i = 0; i < params->num; i++) {
+		intftype = params->entries[i].intftype;
+
+		vfe_intf = params->entries[i].vfe_intf;
+
+
+		cid_mask = msm_ispif_get_cids_mask_from_cfg(
+			&params->entries[i]);
+
+		msm_ispif_enable_intf_cids(ispif, intftype,
+			cid_mask, vfe_intf, 1);
+	}
+
+end:
+	return rc;
+disable_clk:
+	rc = msm_cam_clk_enable(&ispif->pdev->dev,
+		ispif_8974_reset_clk_info, reset_clk,
+		ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+	if (rc < 0) {
+		rc = msm_cam_clk_enable(&ispif->pdev->dev,
+			ispif_8626_reset_clk_info, reset_clk1,
+			ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
+		if (rc < 0)
+			pr_err("%s: cannot enable clock, error = %d",
+				__func__, rc);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int msm_ispif_stop_frame_boundary(struct ispif_device *ispif,
+	struct msm_ispif_param_data *params)
+{
+	int i, rc = 0;
+	uint16_t cid_mask = 0;
+	uint32_t intf_addr;
+	enum msm_ispif_vfe_intf vfe_intf;
+	uint32_t stop_flag = 0;
+
+	if (WARN_ON(!ispif) || WARN_ON(!params))
+		return -EINVAL;
+
+
+	if (ispif->ispif_state != ISPIF_POWER_UP) {
+		pr_err("%s: ispif invalid state %d\n", __func__,
+			ispif->ispif_state);
+		rc = -EPERM;
+		return rc;
+	}
+
+	if (params->num > MAX_PARAM_ENTRIES) {
+		pr_err("%s: invalid param entries %d\n", __func__,
+			params->num);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	for (i = 0; i < params->num; i++) {
+		if (!msm_ispif_is_intf_valid(ispif->csid_version,
+				params->entries[i].vfe_intf)) {
+			pr_err("%s: invalid interface type\n", __func__);
+			rc = -EINVAL;
+			goto end;
+		}
+	}
+
+	msm_ispif_intf_cmd(ispif,
+		ISPIF_INTF_CMD_DISABLE_FRAME_BOUNDARY, params);
+
+	for (i = 0; i < params->num; i++) {
+		cid_mask =
+			msm_ispif_get_cids_mask_from_cfg(&params->entries[i]);
+		vfe_intf = params->entries[i].vfe_intf;
+
+		switch (params->entries[i].intftype) {
+		case PIX0:
+			intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0);
+			break;
+		case RDI0:
+			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0);
+			break;
+		case PIX1:
+			intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1);
+			break;
+		case RDI1:
+			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1);
+			break;
+		case RDI2:
+			intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2);
+			break;
+		default:
+			pr_err("%s: invalid intftype=%d\n", __func__,
+				params->entries[i].intftype);
+			rc = -EPERM;
+			goto end;
+		}
+
+		rc = readl_poll_timeout(ispif->base + intf_addr, stop_flag,
+					(stop_flag & 0xF) == 0xF,
+					ISPIF_TIMEOUT_SLEEP_US,
+					ISPIF_TIMEOUT_ALL_US);
+		if (rc < 0)
+			goto end;
+
+		/* disable CIDs in CID_MASK register */
+		msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype,
+			cid_mask, vfe_intf, 0);
+	}
+
+end:
+	return rc;
+}
+
+static void ispif_process_irq(struct ispif_device *ispif,
+	struct ispif_irq_status *out, enum msm_ispif_vfe_intf vfe_id)
+{
+	if (WARN_ON(!ispif) || WARN_ON(!out))
+		return;
+
+	if (out[vfe_id].ispifIrqStatus0 &
+			ISPIF_IRQ_STATUS_PIX_SOF_MASK) {
+		if (ispif->ispif_sof_debug < 5)
+			pr_err("%s: PIX0 frame id: %u\n", __func__,
+				ispif->sof_count[vfe_id].sof_cnt[PIX0]);
+		ispif->sof_count[vfe_id].sof_cnt[PIX0]++;
+		ispif->ispif_sof_debug++;
+	}
+	if (out[vfe_id].ispifIrqStatus0 &
+			ISPIF_IRQ_STATUS_RDI0_SOF_MASK) {
+		ispif->sof_count[vfe_id].sof_cnt[RDI0]++;
+	}
+	if (out[vfe_id].ispifIrqStatus1 &
+			ISPIF_IRQ_STATUS_RDI1_SOF_MASK) {
+		ispif->sof_count[vfe_id].sof_cnt[RDI1]++;
+	}
+	if (out[vfe_id].ispifIrqStatus2 &
+			ISPIF_IRQ_STATUS_RDI2_SOF_MASK) {
+		ispif->sof_count[vfe_id].sof_cnt[RDI2]++;
+	}
+}
+
+static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out,
+	void *data)
+{
+	struct ispif_device *ispif = (struct ispif_device *)data;
+
+	if (WARN_ON(!ispif) || WARN_ON(!out))
+		return;
+
+	out[VFE0].ispifIrqStatus0 = msm_camera_io_r(ispif->base +
+		ISPIF_VFE_m_IRQ_STATUS_0(VFE0));
+	msm_camera_io_w(out[VFE0].ispifIrqStatus0,
+		ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE0));
+
+	out[VFE0].ispifIrqStatus1 = msm_camera_io_r(ispif->base +
+		ISPIF_VFE_m_IRQ_STATUS_1(VFE0));
+	msm_camera_io_w(out[VFE0].ispifIrqStatus1,
+		ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE0));
+
+	out[VFE0].ispifIrqStatus2 = msm_camera_io_r(ispif->base +
+		ISPIF_VFE_m_IRQ_STATUS_2(VFE0));
+	msm_camera_io_w_mb(out[VFE0].ispifIrqStatus2,
+		ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE0));
+
+	if (ispif->vfe_info.num_vfe > 1) {
+		out[VFE1].ispifIrqStatus0 = msm_camera_io_r(ispif->base +
+			ISPIF_VFE_m_IRQ_STATUS_0(VFE1));
+		msm_camera_io_w(out[VFE1].ispifIrqStatus0,
+			ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE1));
+
+		out[VFE1].ispifIrqStatus1 = msm_camera_io_r(ispif->base +
+			ISPIF_VFE_m_IRQ_STATUS_1(VFE1));
+		msm_camera_io_w(out[VFE1].ispifIrqStatus1,
+				ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE1));
+
+		out[VFE1].ispifIrqStatus2 = msm_camera_io_r(ispif->base +
+			ISPIF_VFE_m_IRQ_STATUS_2(VFE1));
+		msm_camera_io_w_mb(out[VFE1].ispifIrqStatus2,
+			ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE1));
+	}
+	msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
+	ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+	if (out[VFE0].ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK) {
+		if (out[VFE0].ispifIrqStatus0 & RESET_DONE_IRQ)
+			complete(&ispif->reset_complete[VFE0]);
+
+		if (out[VFE0].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
+			pr_err("%s: VFE0 pix0 overflow.\n", __func__);
+
+		if (out[VFE0].ispifIrqStatus0 & RAW_INTF_0_OVERFLOW_IRQ)
+			pr_err("%s: VFE0 rdi0 overflow.\n", __func__);
+
+		if (out[VFE0].ispifIrqStatus1 & RAW_INTF_1_OVERFLOW_IRQ)
+			pr_err("%s: VFE0 rdi1 overflow.\n", __func__);
+
+		if (out[VFE0].ispifIrqStatus2 & RAW_INTF_2_OVERFLOW_IRQ)
+			pr_err("%s: VFE0 rdi2 overflow.\n", __func__);
+
+		ispif_process_irq(ispif, out, VFE0);
+	}
+	if (ispif->hw_num_isps > 1) {
+		if (out[VFE1].ispifIrqStatus0 & RESET_DONE_IRQ)
+			complete(&ispif->reset_complete[VFE1]);
+
+		if (out[VFE1].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
+			pr_err("%s: VFE1 pix0 overflow.\n", __func__);
+
+		if (out[VFE1].ispifIrqStatus0 & RAW_INTF_0_OVERFLOW_IRQ)
+			pr_err("%s: VFE1 rdi0 overflow.\n", __func__);
+
+		if (out[VFE1].ispifIrqStatus1 & RAW_INTF_1_OVERFLOW_IRQ)
+			pr_err("%s: VFE1 rdi1 overflow.\n", __func__);
+
+		if (out[VFE1].ispifIrqStatus2 & RAW_INTF_2_OVERFLOW_IRQ)
+			pr_err("%s: VFE1 rdi2 overflow.\n", __func__);
+
+		ispif_process_irq(ispif, out, VFE1);
+	}
+}
+
+static irqreturn_t msm_io_ispif_irq(int irq_num, void *data)
+{
+	struct ispif_irq_status irq[VFE_MAX];
+
+	msm_ispif_read_irq_status(irq, data);
+	return IRQ_HANDLED;
+}
+
+static int msm_ispif_set_vfe_info(struct ispif_device *ispif,
+	struct msm_ispif_vfe_info *vfe_info)
+{
+	if (!vfe_info || (vfe_info->num_vfe <= 0) ||
+		((uint32_t)(vfe_info->num_vfe) > ispif->hw_num_isps)) {
+		pr_err("Invalid VFE info: %pK %d\n", vfe_info,
+			   (vfe_info ? vfe_info->num_vfe:0));
+		return -EINVAL;
+	}
+
+	memcpy(&ispif->vfe_info, vfe_info, sizeof(struct msm_ispif_vfe_info));
+
+	return 0;
+}
+
+static int msm_ispif_init(struct ispif_device *ispif,
+	uint32_t csid_version)
+{
+	int rc = 0;
+
+	if (WARN_ON(!ispif)) {
+		pr_err("%s: invalid ispif params", __func__);
+		return -EINVAL;
+	}
+
+	if (ispif->ispif_state == ISPIF_POWER_UP) {
+		pr_err("%s: ispif already initted state = %d\n", __func__,
+			ispif->ispif_state);
+		rc = -EPERM;
+		return rc;
+	}
+
+	/* can we set to zero? */
+	ispif->applied_intf_cmd[VFE0].intf_cmd  = 0xFFFFFFFF;
+	ispif->applied_intf_cmd[VFE0].intf_cmd1 = 0xFFFFFFFF;
+	ispif->applied_intf_cmd[VFE1].intf_cmd  = 0xFFFFFFFF;
+	ispif->applied_intf_cmd[VFE1].intf_cmd1 = 0xFFFFFFFF;
+	memset(ispif->sof_count, 0, sizeof(ispif->sof_count));
+
+	ispif->csid_version = csid_version;
+
+	if (ispif->csid_version >= CSID_VERSION_V30) {
+		if (!ispif->clk_mux_mem || !ispif->clk_mux_io) {
+			pr_err("%s csi clk mux mem %pK io %pK\n", __func__,
+				ispif->clk_mux_mem, ispif->clk_mux_io);
+			rc = -ENOMEM;
+			return rc;
+		}
+		ispif->clk_mux_base = ioremap(ispif->clk_mux_mem->start,
+			resource_size(ispif->clk_mux_mem));
+		if (!ispif->clk_mux_base) {
+			pr_err("%s: clk_mux_mem ioremap failed\n", __func__);
+			rc = -ENOMEM;
+			return rc;
+		}
+	}
+
+	ispif->base = ioremap(ispif->mem->start,
+		resource_size(ispif->mem));
+	if (!ispif->base) {
+		rc = -ENOMEM;
+		pr_err("%s: nomem\n", __func__);
+		goto end;
+	}
+	rc = request_irq(ispif->irq->start, msm_io_ispif_irq,
+		IRQF_TRIGGER_RISING, "ispif", ispif);
+	if (rc) {
+		pr_err("%s: request_irq error = %d\n", __func__, rc);
+		goto error_irq;
+	}
+
+	rc = msm_ispif_clk_ahb_enable(ispif, 1);
+	if (rc) {
+		pr_err("%s: ahb_clk enable failed", __func__);
+		goto error_ahb;
+	}
+
+	msm_ispif_reset_hw(ispif);
+
+	rc = msm_ispif_reset(ispif);
+	if (rc == 0) {
+		ispif->ispif_state = ISPIF_POWER_UP;
+		CDBG("%s: power up done\n", __func__);
+		goto end;
+	}
+
+error_ahb:
+	free_irq(ispif->irq->start, ispif);
+error_irq:
+	iounmap(ispif->base);
+
+end:
+	return rc;
+}
+
+static void msm_ispif_release(struct ispif_device *ispif)
+{
+	if (WARN_ON(!ispif)) {
+		pr_err("%s: invalid ispif params", __func__);
+		return;
+	}
+
+	if (!ispif->base) {
+		pr_err("%s: ispif base is NULL\n", __func__);
+		return;
+	}
+
+	if (ispif->ispif_state != ISPIF_POWER_UP) {
+		pr_err("%s: ispif invalid state %d\n", __func__,
+			ispif->ispif_state);
+		return;
+	}
+
+	/* make sure no streaming going on */
+	msm_ispif_reset(ispif);
+
+	msm_ispif_clk_ahb_enable(ispif, 0);
+
+	free_irq(ispif->irq->start, ispif);
+
+	iounmap(ispif->base);
+
+	iounmap(ispif->clk_mux_base);
+
+	ispif->ispif_state = ISPIF_POWER_DOWN;
+}
+
+static long msm_ispif_cmd(struct v4l2_subdev *sd, void *arg)
+{
+	long rc = 0;
+	struct ispif_cfg_data *pcdata = (struct ispif_cfg_data *)arg;
+	struct ispif_device *ispif =
+		(struct ispif_device *)v4l2_get_subdevdata(sd);
+
+	if (WARN_ON(!sd) || WARN_ON(!pcdata))
+		return -EINVAL;
+
+	mutex_lock(&ispif->mutex);
+	switch (pcdata->cfg_type) {
+	case ISPIF_ENABLE_REG_DUMP:
+		ispif->enb_dump_reg = pcdata->reg_dump; /* save dump config */
+		break;
+	case ISPIF_INIT:
+		rc = msm_ispif_init(ispif, pcdata->csid_version);
+		msm_ispif_io_dump_reg(ispif);
+		break;
+	case ISPIF_CFG:
+		rc = msm_ispif_config(ispif, &pcdata->params);
+		msm_ispif_io_dump_reg(ispif);
+		break;
+	case ISPIF_START_FRAME_BOUNDARY:
+		rc = msm_ispif_start_frame_boundary(ispif, &pcdata->params);
+		msm_ispif_io_dump_reg(ispif);
+		break;
+	case ISPIF_RESTART_FRAME_BOUNDARY:
+		rc = msm_ispif_restart_frame_boundary(ispif, &pcdata->params);
+		msm_ispif_io_dump_reg(ispif);
+		break;
+
+	case ISPIF_STOP_FRAME_BOUNDARY:
+		rc = msm_ispif_stop_frame_boundary(ispif, &pcdata->params);
+		msm_ispif_io_dump_reg(ispif);
+		break;
+	case ISPIF_STOP_IMMEDIATELY:
+		rc = msm_ispif_stop_immediately(ispif, &pcdata->params);
+		msm_ispif_io_dump_reg(ispif);
+		break;
+	case ISPIF_RELEASE:
+		msm_ispif_release(ispif);
+		break;
+	case ISPIF_SET_VFE_INFO:
+		rc = msm_ispif_set_vfe_info(ispif, &pcdata->vfe_info);
+		break;
+	default:
+		pr_err("%s: invalid cfg_type\n", __func__);
+		rc = -EINVAL;
+		break;
+	}
+	mutex_unlock(&ispif->mutex);
+	return rc;
+}
+static struct v4l2_file_operations msm_ispif_v4l2_subdev_fops;
+
+static long msm_ispif_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	struct ispif_device *ispif =
+		(struct ispif_device *)v4l2_get_subdevdata(sd);
+
+	switch (cmd) {
+	case VIDIOC_MSM_ISPIF_CFG:
+		return msm_ispif_cmd(sd, arg);
+	case MSM_SD_NOTIFY_FREEZE: {
+		ispif->ispif_sof_debug = 0;
+		return 0;
+	}
+	case MSM_SD_SHUTDOWN: {
+		struct ispif_device *ispif =
+			(struct ispif_device *)v4l2_get_subdevdata(sd);
+		if (ispif && ispif->base) {
+			mutex_lock(&ispif->mutex);
+			msm_ispif_release(ispif);
+			mutex_unlock(&ispif->mutex);
+		}
+		return 0;
+	}
+	default:
+		pr_err_ratelimited("%s: invalid cmd 0x%x received\n",
+			__func__, cmd);
+		return -ENOIOCTLCMD;
+	}
+}
+
+static long msm_ispif_subdev_do_ioctl(
+	struct file *file, unsigned int cmd, void *arg)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+	return msm_ispif_subdev_ioctl(sd, cmd, arg);
+}
+
+static long msm_ispif_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+	unsigned long arg)
+{
+	return video_usercopy(file, cmd, arg, msm_ispif_subdev_do_ioctl);
+}
+
+static int ispif_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	struct ispif_device *ispif = v4l2_get_subdevdata(sd);
+
+	mutex_lock(&ispif->mutex);
+	/* mem remap is done in init when the clock is on */
+	ispif->open_cnt++;
+	mutex_unlock(&ispif->mutex);
+	return 0;
+}
+
+static int ispif_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	int rc = 0;
+	struct ispif_device *ispif = v4l2_get_subdevdata(sd);
+
+	if (!ispif) {
+		pr_err("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ispif->mutex);
+	if (ispif->open_cnt == 0) {
+		pr_err("%s: Invalid close\n", __func__);
+		rc = -ENODEV;
+		goto end;
+	}
+	ispif->open_cnt--;
+	if (ispif->open_cnt == 0)
+		msm_ispif_release(ispif);
+end:
+	mutex_unlock(&ispif->mutex);
+	return rc;
+}
+
+static struct v4l2_subdev_core_ops msm_ispif_subdev_core_ops = {
+	/* .g_chip_ident = &msm_ispif_subdev_g_chip_ident, */
+	.ioctl = &msm_ispif_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_ops msm_ispif_subdev_ops = {
+	.core = &msm_ispif_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_ispif_internal_ops = {
+	.open = ispif_open_node,
+	.close = ispif_close_node,
+};
+
+static int ispif_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct ispif_device *ispif;
+
+	ispif = kzalloc(sizeof(struct ispif_device), GFP_KERNEL);
+	if (!ispif)
+		return -ENOMEM;
+
+	if (pdev->dev.of_node) {
+		of_property_read_u32((&pdev->dev)->of_node,
+			"cell-index", &pdev->id);
+		rc = of_property_read_u32((&pdev->dev)->of_node,
+			"qcom,num-isps", &ispif->hw_num_isps);
+		if (rc)
+			/* backward compatibility */
+			ispif->hw_num_isps = 1;
+		/* not an error condition */
+		rc = 0;
+	}
+
+	mutex_init(&ispif->mutex);
+	ispif->mem = platform_get_resource_byname(pdev,
+		IORESOURCE_MEM, "ispif");
+	if (!ispif->mem) {
+		pr_err("%s: no mem resource?\n", __func__);
+		rc = -ENODEV;
+		goto error;
+	}
+	ispif->irq = platform_get_resource_byname(pdev,
+		IORESOURCE_IRQ, "ispif");
+	if (!ispif->irq) {
+		pr_err("%s: no irq resource?\n", __func__);
+		rc = -ENODEV;
+		goto error;
+	}
+	ispif->io = request_mem_region(ispif->mem->start,
+		resource_size(ispif->mem), pdev->name);
+	if (!ispif->io) {
+		pr_err("%s: no valid mem region\n", __func__);
+		rc = -EBUSY;
+		goto error;
+	}
+	ispif->clk_mux_mem = platform_get_resource_byname(pdev,
+		IORESOURCE_MEM, "csi_clk_mux");
+	if (ispif->clk_mux_mem) {
+		ispif->clk_mux_io = request_mem_region(
+			ispif->clk_mux_mem->start,
+			resource_size(ispif->clk_mux_mem),
+			ispif->clk_mux_mem->name);
+		if (!ispif->clk_mux_io)
+			pr_err("%s: no valid csi_mux region\n", __func__);
+	}
+
+	v4l2_subdev_init(&ispif->msm_sd.sd, &msm_ispif_subdev_ops);
+	ispif->msm_sd.sd.internal_ops = &msm_ispif_internal_ops;
+	ispif->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+	snprintf(ispif->msm_sd.sd.name,
+		ARRAY_SIZE(ispif->msm_sd.sd.name), MSM_ISPIF_DRV_NAME);
+	v4l2_set_subdevdata(&ispif->msm_sd.sd, ispif);
+
+	platform_set_drvdata(pdev, &ispif->msm_sd.sd);
+
+	media_entity_pads_init(&ispif->msm_sd.sd.entity, 0, NULL);
+	ispif->msm_sd.sd.entity.function = MSM_CAMERA_SUBDEV_ISPIF;
+	ispif->msm_sd.sd.entity.name = pdev->name;
+	ispif->msm_sd.close_seq = MSM_SD_CLOSE_1ST_CATEGORY | 0x1;
+	rc = msm_sd_register(&ispif->msm_sd);
+	if (rc) {
+		pr_err("%s: msm_sd_register error = %d\n", __func__, rc);
+		goto error;
+	}
+	msm_ispif_v4l2_subdev_fops.owner = v4l2_subdev_fops.owner;
+	msm_ispif_v4l2_subdev_fops.open = v4l2_subdev_fops.open;
+	msm_ispif_v4l2_subdev_fops.unlocked_ioctl = msm_ispif_subdev_fops_ioctl;
+	msm_ispif_v4l2_subdev_fops.release = v4l2_subdev_fops.release;
+	msm_ispif_v4l2_subdev_fops.poll = v4l2_subdev_fops.poll;
+#ifdef CONFIG_COMPAT
+	msm_ispif_v4l2_subdev_fops.compat_ioctl32 = msm_ispif_subdev_fops_ioctl;
+#endif
+	ispif->msm_sd.sd.devnode->fops = &msm_ispif_v4l2_subdev_fops;
+	ispif->pdev = pdev;
+	ispif->ispif_state = ISPIF_POWER_DOWN;
+	ispif->open_cnt = 0;
+	return 0;
+
+error:
+	mutex_destroy(&ispif->mutex);
+	kfree(ispif);
+	return rc;
+}
+
+static const struct of_device_id msm_ispif_dt_match[] = {
+	{.compatible = "qcom,ispif"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_ispif_dt_match);
+
+static struct platform_driver ispif_driver = {
+	.probe = ispif_probe,
+	.driver = {
+		.name = MSM_ISPIF_DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_ispif_dt_match,
+	},
+};
+
+static int __init msm_ispif_init_module(void)
+{
+	return platform_driver_register(&ispif_driver);
+}
+
+static void __exit msm_ispif_exit_module(void)
+{
+	platform_driver_unregister(&ispif_driver);
+}
+
+module_init(msm_ispif_init_module);
+module_exit(msm_ispif_exit_module);
+MODULE_DESCRIPTION("MSM ISP Interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.h
new file mode 100644
index 0000000..6217fba
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.h
@@ -0,0 +1,69 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_ISPIF_H
+#define MSM_ISPIF_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <media/msmb_ispif.h>
+#include "msm_sd.h"
+
+#define ISPIF_CLK_INFO_MAX 24
+
+struct ispif_irq_status {
+	uint32_t ispifIrqStatus0;
+	uint32_t ispifIrqStatus1;
+	uint32_t ispifIrqStatus2;
+};
+
+enum msm_ispif_state_t {
+	ISPIF_POWER_UP,
+	ISPIF_POWER_DOWN,
+};
+struct ispif_sof_count {
+	uint32_t sof_cnt[INTF_MAX];
+};
+
+struct ispif_intf_cmd {
+	uint32_t intf_cmd;
+	uint32_t intf_cmd1;
+};
+
+struct ispif_device {
+	struct platform_device *pdev;
+	struct msm_sd_subdev msm_sd;
+	struct resource *mem;
+	struct resource *clk_mux_mem;
+	struct resource *irq;
+	struct resource *io;
+	struct resource *clk_mux_io;
+	void __iomem *base;
+	void __iomem *clk_mux_base;
+	struct mutex mutex;
+	uint8_t start_ack_pending;
+	uint32_t csid_version;
+	int enb_dump_reg;
+	uint32_t open_cnt;
+	struct ispif_sof_count sof_count[VFE_MAX];
+	struct ispif_intf_cmd applied_intf_cmd[VFE_MAX];
+	enum msm_ispif_state_t ispif_state;
+	struct msm_ispif_vfe_info vfe_info;
+	struct clk *ahb_clk[ISPIF_CLK_INFO_MAX];
+	struct completion reset_complete[VFE_MAX];
+	uint32_t hw_num_isps;
+	uint32_t num_ahb_clk;
+	uint32_t clk_idx;
+	uint32_t ispif_sof_debug;
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
index 625a0db..c045eda 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
@@ -2077,14 +2077,8 @@
 
 static int __init msm_actuator_init_module(void)
 {
-	int32_t rc = 0;
-
 	CDBG("Enter\n");
-	rc = platform_driver_register(&msm_actuator_platform_driver);
-	if (!rc)
-		return rc;
-
-	CDBG("%s:%d rc %d\n", __func__, __LINE__, rc);
+	platform_driver_register(&msm_actuator_platform_driver);
 	return i2c_add_driver(&msm_actuator_i2c_driver);
 }
 
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_4_3_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_4_3_hwreg.h
new file mode 100644
index 0000000..ac0a989
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_4_3_hwreg.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_4_3_HWREG_H
+#define MSM_CSID_3_4_3_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+static uint8_t csid_lane_assign_v3_4_3[PHY_LANE_MAX] = {0, 4, 1, 2, 3};
+static struct csid_reg_parms_t csid_v3_4_3 = {
+	/* MIPI	CSID registers */
+	0x0,
+	0x4,
+	0x8,
+	0xC,
+	0x10,
+	0x14,
+	0x18,
+	0x1C,
+	0x20,
+	0x60,
+	0x64,
+	0x68,
+	0x6C,
+	0x70,
+	0x74,
+	0x78,
+	0x7C,
+	0x80,
+	0x84,
+	0x88,
+	0x8C,
+	0x90,
+	0x94,
+	0x98,
+	0xA0,
+	0xA4,
+	0xAC,
+	0xB0,
+	0xB4,
+	11,
+	0x7FFF,
+	0x4,
+	17,
+	0x30040003,
+	0xFFFFFFFF,
+	0xFFFFFFFF,
+	0xFFFFFFFF,
+	0x7f010800,
+	20,
+	0xFFFFFFFF,
+	0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
index ba32526..9d3184e 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
@@ -26,6 +26,7 @@
 #include "include/msm_csid_3_5_hwreg.h"
 #include "include/msm_csid_3_4_1_hwreg.h"
 #include "include/msm_csid_3_4_2_hwreg.h"
+#include "include/msm_csid_3_4_3_hwreg.h"
 #include "include/msm_csid_3_6_0_hwreg.h"
 #include "include/msm_csid_3_5_1_hwreg.h"
 #include "cam_hw_ops.h"
@@ -42,6 +43,7 @@
 #define CSID_VERSION_V34                      0x30040000
 #define CSID_VERSION_V34_1                    0x30040001
 #define CSID_VERSION_V34_2                    0x30040002
+#define CSID_VERSION_V34_3                    0x30040003
 #define CSID_VERSION_V36                      0x30060000
 #define CSID_VERSION_V37                      0x30070000
 #define CSID_VERSION_V35                      0x30050000
@@ -229,7 +231,9 @@
 static int msm_csid_reset(struct csid_device *csid_dev)
 {
 	int32_t rc = 0;
+	uint32_t irq = 0, irq_bitshift;
 
+	irq_bitshift = csid_dev->ctrl_reg->csid_reg.csid_rst_done_irq_bitshift;
 	msm_camera_io_w(csid_dev->ctrl_reg->csid_reg.csid_rst_stb_all,
 		csid_dev->base +
 		csid_dev->ctrl_reg->csid_reg.csid_rst_cmd_addr);
@@ -238,8 +242,23 @@
 	if (rc < 0) {
 		pr_err("wait_for_completion in msm_csid_reset fail rc = %d\n",
 			rc);
+	} else if (rc == 0) {
+		irq = msm_camera_io_r(csid_dev->base +
+			csid_dev->ctrl_reg->csid_reg.csid_irq_status_addr);
+		pr_err_ratelimited("%s CSID%d_IRQ_STATUS_ADDR = 0x%x\n",
+			__func__, csid_dev->pdev->id, irq);
+		if (irq & (0x1 << irq_bitshift)) {
+			rc = 1;
+			CDBG("%s succeeded", __func__);
+		} else {
+			rc = 0;
+			pr_err("%s reset csid_irq_status failed = 0x%x\n",
+				__func__, irq);
+		}
 		if (rc == 0)
 			rc = -ETIMEDOUT;
+	} else {
+		CDBG("%s succeeded", __func__);
 	}
 	return rc;
 }
@@ -306,8 +325,7 @@
 	if (!msm_csid_find_max_clk_rate(csid_dev))
 		pr_err("msm_csid_find_max_clk_rate failed\n");
 
-	clk_rate = (csid_params->csi_clk > 0) ?
-				(csid_params->csi_clk) : csid_dev->csid_max_clk;
+	clk_rate = csid_dev->csid_max_clk;
 
 	clk_rate = msm_camera_clk_set_rate(&csid_dev->pdev->dev,
 		csid_dev->csid_clk[csid_dev->csid_clk_index], clk_rate);
@@ -1163,6 +1181,12 @@
 		new_csid_dev->ctrl_reg->csid_lane_assign =
 			csid_lane_assign_v3_4_2;
 	} else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+		"qcom,csid-v3.4.3")) {
+		new_csid_dev->ctrl_reg->csid_reg = csid_v3_4_3;
+		new_csid_dev->hw_dts_version = CSID_VERSION_V34_3;
+		new_csid_dev->ctrl_reg->csid_lane_assign =
+			csid_lane_assign_v3_4_3;
+	} else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
 		"qcom,csid-v3.6.0")) {
 		new_csid_dev->ctrl_reg->csid_reg = csid_v3_6_0;
 		new_csid_dev->hw_dts_version = CSID_VERSION_V36;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
index 6fc8e1e..0cfddb3 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
@@ -29,7 +29,6 @@
 #include "include/msm_csiphy_5_0_hwreg.h"
 #include "include/msm_csiphy_5_0_1_hwreg.h"
 #include "include/msm_csiphy_10_0_0_hwreg.h"
-
 #include "cam_hw_ops.h"
 
 #define DBG_CSIPHY 0
@@ -218,6 +217,8 @@
 		}
 	}
 
+	csiphy_dev->snps_programmed_data_rate = csiphy_params->data_rate;
+
 	if (mode == TWO_LANE_PHY_A) {
 		msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg.
 			mipi_csiphy_sys_ctrl.data,
@@ -322,8 +323,11 @@
 		mode = AGGREGATE_MODE;
 		num_lanes = 4;
 		if (csiphy_dev->snps_state != NOT_CONFIGURED) {
-			pr_err("%s: invalid request\n", __func__);
-			return -EINVAL;
+			if (csiphy_dev->snps_programmed_data_rate !=
+				csiphy_params->data_rate)
+				pr_err("reconfiguring snps phy");
+			else
+				return 0;
 		}
 		csiphy_dev->snps_state = CONFIGURED_AGGREGATE_MODE;
 		clk_mux_reg &= ~0xff;
@@ -332,34 +336,38 @@
 	} else if (lane_mask == LANE_MASK_PHY_A) { /* PHY A */
 		/* 2 lane config */
 		num_lanes = 2;
+		mode = TWO_LANE_PHY_A;
 		if (csiphy_dev->snps_state == NOT_CONFIGURED) {
-			mode = TWO_LANE_PHY_A;
 			csiphy_dev->snps_state = CONFIGURED_TWO_LANE_PHY_A;
 		} else if (csiphy_dev->snps_state ==
 			CONFIGURED_TWO_LANE_PHY_B) {
 			/* 2 lane + 2 lane config */
-			mode = TWO_LANE_PHY_A;
 			csiphy_dev->snps_state = CONFIGURED_COMBO_MODE;
 		} else {
-			pr_err("%s: invalid request\n", __func__);
-			return -EINVAL;
+			if (csiphy_dev->snps_programmed_data_rate !=
+				csiphy_params->data_rate)
+				pr_err("reconfiguring snps phy");
+			else
+				return 0;
 		}
 		clk_mux_reg &= ~0xf;
 		clk_mux_reg |= (uint32_t)csiphy_params->csid_core;
 	} else if (lane_mask == LANE_MASK_PHY_B) { /* PHY B */
 		/* 2 lane config */
 		num_lanes = 2;
+		mode = TWO_LANE_PHY_B;
 		if (csiphy_dev->snps_state == NOT_CONFIGURED) {
-			mode = TWO_LANE_PHY_B;
 			csiphy_dev->snps_state = CONFIGURED_TWO_LANE_PHY_B;
 		} else if (csiphy_dev->snps_state ==
 			CONFIGURED_TWO_LANE_PHY_A) {
 			/* 2 lane + 2 lane config */
-			mode = TWO_LANE_PHY_B;
 			csiphy_dev->snps_state = CONFIGURED_COMBO_MODE;
 		} else {
-			pr_err("%s: invalid request\n", __func__);
-			return -EINVAL;
+			if (csiphy_dev->snps_programmed_data_rate !=
+				csiphy_params->data_rate)
+				pr_err("reconfiguring snps phy");
+			else
+				return 0;
 		}
 		clk_mux_reg &= ~0xf0;
 		clk_mux_reg |= csiphy_params->csid_core << 4;
@@ -1239,9 +1247,7 @@
 		return rc;
 	}
 
-	clk_rate = (csiphy_params->csiphy_clk > 0)
-			? csiphy_params->csiphy_clk :
-			csiphy_dev->csiphy_max_clk;
+	clk_rate = csiphy_dev->csiphy_max_clk;
 	clk_rate = msm_camera_clk_set_rate(&csiphy_dev->pdev->dev,
 		csiphy_dev->csiphy_clk[csiphy_dev->csiphy_clk_index],
 		clk_rate);
@@ -1660,6 +1666,7 @@
 		csiphy_dev->hw_version);
 	csiphy_dev->csiphy_state = CSIPHY_POWER_UP;
 	csiphy_dev->snps_state = NOT_CONFIGURED;
+	csiphy_dev->snps_programmed_data_rate = 0;
 	return 0;
 
 csiphy_enable_clk_fail:
@@ -1766,6 +1773,7 @@
 		csiphy_dev->hw_version);
 	csiphy_dev->csiphy_state = CSIPHY_POWER_UP;
 	csiphy_dev->snps_state = NOT_CONFIGURED;
+	csiphy_dev->snps_programmed_data_rate = 0;
 	return 0;
 
 csiphy_enable_clk_fail:
@@ -1915,6 +1923,7 @@
 
 	csiphy_dev->csiphy_state = CSIPHY_POWER_DOWN;
 	csiphy_dev->snps_state = NOT_CONFIGURED;
+	csiphy_dev->snps_programmed_data_rate = 0;
 
 	if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
 		 CAM_AHB_SUSPEND_VOTE) < 0)
@@ -2047,6 +2056,7 @@
 
 	csiphy_dev->csiphy_state = CSIPHY_POWER_DOWN;
 	csiphy_dev->snps_state = NOT_CONFIGURED;
+	csiphy_dev->snps_programmed_data_rate = 0;
 
 	if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
 		 CAM_AHB_SUSPEND_VOTE) < 0)
@@ -2079,7 +2089,10 @@
 			rc = -EFAULT;
 			break;
 		}
-		csiphy_dev->csiphy_sof_debug = SOF_DEBUG_DISABLE;
+		if (csiphy_dev->csiphy_sof_debug == SOF_DEBUG_ENABLE) {
+			csiphy_dev->csiphy_sof_debug = SOF_DEBUG_DISABLE;
+			rc = msm_camera_enable_irq(csiphy_dev->irq, false);
+		}
 		rc = msm_csiphy_lane_config(csiphy_dev, &csiphy_params);
 		break;
 	case CSIPHY_RELEASE:
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
index 79baf3c..41d2034 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
@@ -250,6 +250,7 @@
 	uint8_t is_snps_phy;
 	enum snps_csiphy_state snps_state;
 	uint8_t num_clk_irq_registers;
+	uint64_t snps_programmed_data_rate;
 };
 
 #define VIDIOC_MSM_CSIPHY_RELEASE \
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c
index df22d84..3590e15 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c
@@ -20,7 +20,6 @@
 #include "msm_sensor.h"
 
 #undef CDBG
-#define MSM_CAMERA_TZ_I2C_VERBOSE
 
 #ifdef CONFIG_MSM_SEC_CCI_DEBUG
 	#define TZ_I2C_FN_RETURN(ret, i2c_fn, ...) \
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
index c0c83e5..6f39956 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
@@ -82,9 +82,13 @@
 
 int32_t msm_sensor_free_sensor_data(struct msm_sensor_ctrl_t *s_ctrl)
 {
+	struct msm_camera_sensor_slave_info *slave_info = NULL;
+
 	if (!s_ctrl->pdev && !s_ctrl->sensor_i2c_client->client)
 		return 0;
 	kfree(s_ctrl->sensordata->slave_info);
+	slave_info = s_ctrl->sensordata->cam_slave_info;
+	kfree(slave_info->sensor_id_info.setting.reg_setting);
 	kfree(s_ctrl->sensordata->cam_slave_info);
 	kfree(s_ctrl->sensordata->actuator_info);
 	kfree(s_ctrl->sensordata->power_info.gpio_conf->gpio_num_info);
@@ -261,6 +265,17 @@
 		return -EINVAL;
 	}
 
+	if (slave_info->setting && slave_info->setting->size > 0) {
+		rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+			i2c_write_table(s_ctrl->sensor_i2c_client,
+			slave_info->setting);
+		if (rc < 0)
+			pr_err("Write array failed prior to probe\n");
+
+	} else {
+		CDBG("No writes needed for this sensor before probe\n");
+	}
+
 	rc = sensor_i2c_client->i2c_func_tbl->i2c_read(
 		sensor_i2c_client, slave_info->sensor_id_reg_addr,
 		&chipid, MSM_CAMERA_I2C_WORD_DATA);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
index 0b0f98a..7832181 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
@@ -10,7 +10,7 @@
  * GNU General Public License for more details.
  */
 
-#define SENSOR_DRIVER_I2C "i2c_camera"
+#define SENSOR_DRIVER_I2C "camera"
 /* Header file declaration */
 #include "msm_sensor.h"
 #include "msm_sd.h"
@@ -757,6 +757,8 @@
 
 	unsigned long                        mount_pos = 0;
 	uint32_t                             is_yuv;
+	struct msm_camera_i2c_reg_array     *reg_setting = NULL;
+	struct msm_sensor_id_info_t         *id_info = NULL;
 
 	/* Validate input parameters */
 	if (!setting) {
@@ -805,7 +807,54 @@
 		slave_info->camera_id = slave_info32->camera_id;
 
 		slave_info->i2c_freq_mode = slave_info32->i2c_freq_mode;
-		slave_info->sensor_id_info = slave_info32->sensor_id_info;
+		slave_info->sensor_id_info.sensor_id_reg_addr =
+			slave_info32->sensor_id_info.sensor_id_reg_addr;
+		slave_info->sensor_id_info.sensor_id_mask =
+			slave_info32->sensor_id_info.sensor_id_mask;
+		slave_info->sensor_id_info.sensor_id =
+			slave_info32->sensor_id_info.sensor_id;
+
+		slave_info->sensor_id_info.setting.addr_type =
+			slave_info32->sensor_id_info.setting.addr_type;
+		slave_info->sensor_id_info.setting.data_type =
+			slave_info32->sensor_id_info.setting.data_type;
+		slave_info->sensor_id_info.setting.delay =
+			slave_info32->sensor_id_info.setting.delay;
+		slave_info->sensor_id_info.setting.size =
+			slave_info32->sensor_id_info.setting.size;
+
+		if (!slave_info->sensor_id_info.setting.size ||
+			(slave_info->sensor_id_info.setting.size >
+				I2C_REG_DATA_MAX)) {
+			CDBG("%s:No writes needed to probe\n", __func__);
+			slave_info->sensor_id_info.setting.reg_setting = NULL;
+		} else {
+			id_info = &(slave_info->sensor_id_info);
+			reg_setting =
+				kzalloc(id_info->setting.size *
+					(sizeof
+					(struct msm_camera_i2c_reg_array)),
+					GFP_KERNEL);
+			if (!reg_setting) {
+				rc = -ENOMEM;
+				goto free_slave_info;
+			}
+			if (copy_from_user(reg_setting,
+				(void __user *)
+				compat_ptr(slave_info32->sensor_id_info.
+				setting.reg_setting),
+				slave_info->sensor_id_info.setting.size *
+				sizeof(struct msm_camera_i2c_reg_array))) {
+				pr_err("%s:%d: sensor id info copy failed\n",
+					__func__, __LINE__);
+				kfree(reg_setting);
+				rc = -EFAULT;
+				goto free_slave_info;
+			}
+
+			slave_info->sensor_id_info.setting.reg_setting =
+				reg_setting;
+		}
 
 		slave_info->slave_addr = slave_info32->slave_addr;
 		slave_info->power_setting_array.size =
@@ -841,6 +890,37 @@
 			rc = -EFAULT;
 			goto free_slave_info;
 		}
+		if (!slave_info->sensor_id_info.setting.size ||
+			slave_info->sensor_id_info.setting.size >
+			I2C_REG_DATA_MAX) {
+			CDBG("%s:No writes needed to probe\n", __func__);
+			slave_info->sensor_id_info.setting.reg_setting = NULL;
+		} else {
+			id_info = &(slave_info->sensor_id_info);
+			reg_setting =
+				kzalloc(id_info->setting.size *
+					(sizeof
+					(struct msm_camera_i2c_reg_array)),
+					GFP_KERNEL);
+			if (!reg_setting) {
+				rc = -ENOMEM;
+				goto free_slave_info;
+			}
+			if (copy_from_user(reg_setting,
+				(void __user *)
+				slave_info->sensor_id_info.setting.reg_setting,
+				slave_info->sensor_id_info.setting.size *
+				sizeof(struct msm_camera_i2c_reg_array))) {
+				pr_err("%s:%d: sensor id info copy failed\n",
+					__func__, __LINE__);
+				kfree(reg_setting);
+				rc = -EFAULT;
+				goto free_slave_info;
+			}
+
+			slave_info->sensor_id_info.setting.reg_setting =
+				reg_setting;
+		}
 	}
 
 	if (strlen(slave_info->sensor_name) >= MAX_SENSOR_NAME ||
@@ -956,6 +1036,7 @@
 		slave_info->sensor_id_info.sensor_id_reg_addr;
 	camera_info->sensor_id = slave_info->sensor_id_info.sensor_id;
 	camera_info->sensor_id_mask = slave_info->sensor_id_info.sensor_id_mask;
+	camera_info->setting = &(slave_info->sensor_id_info.setting);
 
 	/* Fill CCI master, slave address and CCI default params */
 	if (!s_ctrl->sensor_i2c_client) {
@@ -1116,6 +1197,7 @@
 free_camera_info:
 	kfree(camera_info);
 free_slave_info:
+	kfree(slave_info->sensor_id_info.setting.reg_setting);
 	kfree(slave_info);
 	return rc;
 }
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 48ef46c..c0566a3 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -3733,6 +3733,7 @@
 		.name = SDE_ROTATOR_DRV_NAME,
 		.of_match_table = sde_rotator_dt_match,
 		.pm = &sde_rotator_pm_ops,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 9a7d272..e6a4ed30 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -738,7 +738,7 @@
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS)|
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_ROI_QP)|
-			(1UL << V4L2_MPEG_VIDC_EXTRADATA_ENC_FRAME_QP)
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO)
 			),
 		.qmenu = mpeg_video_vidc_extradata,
 	},
@@ -1548,10 +1548,6 @@
 	}
 	case V4L2_CID_MPEG_VIDC_IMG_GRID_DIMENSION:
 	{
-		int i = 0, j = 0;
-		u32 width = 0, height = 0;
-		u32 trows, tcols;
-
 		property_id = HAL_CONFIG_HEIC_GRID_ENABLE;
 		if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC) {
 			dprintk(VIDC_ERR, "Grid is supported only for HEVC\n");
@@ -1566,38 +1562,6 @@
 		grid_enable.grid_enable = ctrl->val;
 		inst->img_grid_dimension = ctrl->val;
 		pdata = &grid_enable;
-
-		/* Update tile info table */
-		width = inst->prop.width[OUTPUT_PORT];
-		height = inst->prop.height[OUTPUT_PORT];
-		tcols = (width + inst->img_grid_dimension - 1) /
-					inst->img_grid_dimension;
-		trows = (height + inst->img_grid_dimension - 1) /
-					inst->img_grid_dimension;
-		inst->tinfo.count = trows * tcols;
-		if (inst->tinfo.count > MAX_HEIC_TILES_COUNT) {
-			dprintk(VIDC_ERR, "Tiles count exceeds maximum\n");
-			rc = -ENOTSUPP;
-			break;
-		}
-
-		dprintk(VIDC_DBG,
-			"Grid dimension %d width %d height %d row %d col %d\n",
-			inst->img_grid_dimension, width, height,
-			trows, tcols);
-
-		for (j = 0; j < trows; ++j) {
-			for (i = 0; i < tcols; ++i) {
-				inst->tinfo.tile_rects[j*tcols+i].left =
-					(i * inst->img_grid_dimension);
-				inst->tinfo.tile_rects[j*tcols+i].top =
-					(j * inst->img_grid_dimension);
-				inst->tinfo.tile_rects[j*tcols+i].width =
-					inst->img_grid_dimension;
-				inst->tinfo.tile_rects[j*tcols+i].height =
-					inst->img_grid_dimension;
-			}
-		}
 		break;
 	}
 	case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
@@ -2275,6 +2239,7 @@
 			enable.enable = 0;
 		pdata = &enable;
 		inst->clk_data.low_latency_mode = (bool) enable.enable;
+		msm_dcvs_try_enable(inst);
 		break;
 	}
 	case V4L2_CID_MPEG_VIDC_VIDEO_H264_TRANSFORM_8x8:
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 8e32504..651d0a2 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -901,6 +901,51 @@
 	return rc;
 }
 
+static int msm_vidc_create_tile_info_table(struct msm_vidc_inst *inst)
+{
+	int i = 0, j = 0;
+	u32 width = 0, height = 0;
+	u32 trows = 0, tcols = 0;
+
+	/* Don't create table for non-HEIC formats*/
+	if (inst->img_grid_dimension <= 0 ||
+		inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC)
+		return 0;
+
+	width = inst->prop.width[OUTPUT_PORT];
+	height = inst->prop.height[OUTPUT_PORT];
+	tcols = (width + inst->img_grid_dimension - 1) /
+		inst->img_grid_dimension;
+	trows = (height + inst->img_grid_dimension - 1) /
+		inst->img_grid_dimension;
+	inst->tinfo.count = trows * tcols;
+	if (inst->tinfo.count > MAX_HEIC_TILES_COUNT) {
+		dprintk(VIDC_ERR,
+			"Tiles count (%d) exceeds maximum\n",
+			inst->tinfo.count);
+		return -ENOTSUPP;
+	}
+
+	dprintk(VIDC_DBG,
+		"Grid dimension %d width %d height %d row %d col %d\n",
+		inst->img_grid_dimension, width, height,
+		trows, tcols);
+
+	for (j = 0; j < trows; ++j) {
+		for (i = 0; i < tcols; ++i) {
+			inst->tinfo.tile_rects[j*tcols+i].left =
+				(i * inst->img_grid_dimension);
+			inst->tinfo.tile_rects[j*tcols+i].top =
+				(j * inst->img_grid_dimension);
+			inst->tinfo.tile_rects[j*tcols+i].width =
+				inst->img_grid_dimension;
+			inst->tinfo.tile_rects[j*tcols+i].height =
+				inst->img_grid_dimension;
+		}
+	}
+	return 0;
+}
+
 static inline int start_streaming(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
@@ -909,6 +954,13 @@
 
 	hdev = inst->core->device;
 
+	/* Create tile info table */
+	rc = msm_vidc_create_tile_info_table(inst);
+	if (rc) {
+		dprintk(VIDC_ERR, "Tile info table was not generated\n");
+		goto fail_start;
+	}
+
 	/* Check if current session is under HW capability */
 	rc = msm_vidc_check_session_supported(inst);
 	if (rc) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 6ae030f..a665978 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -609,6 +609,7 @@
 	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
 	u64 rate = 0;
 	struct clock_data *dcvs = NULL;
+	u32 operating_rate, vsp_factor_num = 10, vsp_factor_den = 7;
 
 	core = inst->core;
 	dcvs = &inst->clk_data;
@@ -631,8 +632,19 @@
 
 		vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles;
 
-		/* 10 / 7 is overhead factor */
-		vsp_cycles += (inst->clk_data.bitrate * 10) / 7;
+		operating_rate = inst->clk_data.operating_rate >> 16;
+		if (operating_rate > inst->prop.fps && inst->prop.fps) {
+			vsp_factor_num *= operating_rate;
+			vsp_factor_den *= inst->prop.fps;
+		}
+		//adjust factor for 2 core case, due to workload is not
+		//equally distributed on 2 cores, use 0.65 instead of 0.5
+		if (inst->clk_data.core_id == VIDC_CORE_ID_3) {
+			vsp_factor_num = vsp_factor_num * 13 / 10;
+			vsp_factor_den *= 2;
+		}
+		vsp_cycles += ((u64)inst->clk_data.bitrate * vsp_factor_num) /
+				vsp_factor_den;
 	} else if (inst->session_type == MSM_VIDC_DECODER) {
 		vpp_cycles = mbs_per_second * inst->clk_data.entry->vpp_cycles;
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index f9bc79c..d77ea21 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -2361,6 +2361,17 @@
 	return 0;
 }
 
+static bool is_heic_encode_session(struct msm_vidc_inst *inst)
+{
+	if (inst->session_type == MSM_VIDC_ENCODER &&
+		(get_hal_codec(inst->fmts[CAPTURE_PORT].fourcc) ==
+		HAL_VIDEO_CODEC_HEVC) &&
+		(inst->img_grid_dimension > 0))
+		return true;
+	else
+		return false;
+}
+
 static bool is_eos_buffer(struct msm_vidc_inst *inst, u32 device_addr)
 {
 	struct eos_buf *temp, *next;
@@ -2406,9 +2417,7 @@
 	}
 
 	empty_buf_done = (struct vidc_hal_ebd *)&response->input_done;
-	if ((get_hal_codec(inst->fmts[CAPTURE_PORT].fourcc) ==
-		HAL_VIDEO_CODEC_HEVC) &&
-		(inst->img_grid_dimension > 0) &&
+	if (is_heic_encode_session(inst) &&
 		(empty_buf_done->input_tag < inst->tinfo.count - 1)) {
 		dprintk(VIDC_DBG, "Wait for last tile. Current tile no: %d\n",
 		empty_buf_done->input_tag);
@@ -3153,10 +3162,11 @@
 static void msm_vidc_print_running_insts(struct msm_vidc_core *core)
 {
 	struct msm_vidc_inst *temp;
+	int op_rate = 0;
 
 	dprintk(VIDC_ERR, "Running instances:\n");
-	dprintk(VIDC_ERR, "%4s|%4s|%4s|%4s|%4s\n",
-			"type", "w", "h", "fps", "prop");
+	dprintk(VIDC_ERR, "%4s|%4s|%4s|%4s|%6s|%4s\n",
+			"type", "w", "h", "fps", "opr", "prop");
 
 	mutex_lock(&core->lock);
 	list_for_each_entry(temp, &core->instances, list) {
@@ -3170,13 +3180,21 @@
 			if (msm_comm_turbo_session(temp))
 				strlcat(properties, "T", sizeof(properties));
 
-			dprintk(VIDC_ERR, "%4d|%4d|%4d|%4d|%4s\n",
+			if (is_realtime_session(temp))
+				strlcat(properties, "R", sizeof(properties));
+
+			if (temp->clk_data.operating_rate)
+				op_rate = temp->clk_data.operating_rate >> 16;
+			else
+				op_rate = temp->prop.fps;
+
+			dprintk(VIDC_ERR, "%4d|%4d|%4d|%4d|%6d|%4s\n",
 					temp->session_type,
 					max(temp->prop.width[CAPTURE_PORT],
 						temp->prop.width[OUTPUT_PORT]),
 					max(temp->prop.height[CAPTURE_PORT],
 						temp->prop.height[OUTPUT_PORT]),
-					temp->prop.fps, properties);
+					temp->prop.fps, op_rate, properties);
 		}
 	}
 	mutex_unlock(&core->lock);
@@ -4424,9 +4442,7 @@
 		for (c = 0; c < etbs.count; ++c) {
 			struct vidc_frame_data *frame_data = &etbs.data[c];
 
-			if (get_hal_codec(inst->fmts[CAPTURE_PORT].fourcc) ==
-				HAL_VIDEO_CODEC_HEVC &&
-				(inst->img_grid_dimension > 0)) {
+			if (is_heic_encode_session(inst)) {
 				rc = msm_comm_qbuf_heic_tiles(inst, frame_data);
 				if (rc) {
 					dprintk(VIDC_ERR,
@@ -5504,6 +5520,11 @@
 	u32 input_height, input_width, output_height, output_width;
 	u32 rotation;
 
+	if (is_heic_encode_session(inst)) {
+		dprintk(VIDC_DBG, "Skip downscale check for HEIC\n");
+		return 0;
+	}
+
 	input_height = inst->prop.height[OUTPUT_PORT];
 	input_width = inst->prop.width[OUTPUT_PORT];
 	output_height = inst->prop.height[CAPTURE_PORT];
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 530fe3a..1dc6723 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -25,6 +25,7 @@
 enum clock_properties {
 	CLOCK_PROP_HAS_SCALING = 1 << 0,
 	CLOCK_PROP_HAS_MEM_RETENTION    = 1 << 1,
+	CLOCK_PROP_DISABLE_MEMCORE_ONLY = 1 << 2,
 };
 
 #define PERF_GOV "performance"
@@ -666,6 +667,11 @@
 		else
 			vc->has_mem_retention = false;
 
+		if (clock_props[c] & CLOCK_PROP_DISABLE_MEMCORE_ONLY)
+			vc->disable_memcore_only = true;
+		else
+			vc->disable_memcore_only = false;
+
 		dprintk(VIDC_DBG, "Found clock %s: scale-able = %s\n", vc->name,
 			vc->count ? "yes" : "no");
 	}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 23e33fe..06d8fa5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -90,6 +90,7 @@
 	u32 count;
 	bool has_scaling;
 	bool has_mem_retention;
+	bool disable_memcore_only;
 };
 
 struct clock_set {
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index ade04e3..79ed798 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1097,6 +1097,8 @@
 	int rc = 0;
 
 	venus_hfi_for_each_clock(device, cl) {
+		if (cl->disable_memcore_only)
+			continue;
 		if (cl->has_scaling) {/* has_scaling */
 			device->clk_freq = freq;
 			rc = clk_set_rate(cl->clk, freq);
@@ -3362,6 +3364,8 @@
 	}
 
 	venus_hfi_for_each_clock_reverse(device, cl) {
+		if (cl->disable_memcore_only)
+			continue;
 		dprintk(VIDC_DBG, "Clock: %s disable and unprepare\n",
 				cl->name);
 		rc = clk_set_flags(cl->clk, CLKFLAG_NORETAIN_PERIPH);
@@ -3391,6 +3395,11 @@
 	}
 
 	venus_hfi_for_each_clock(device, cl) {
+		/* MEM CORE is ON by default. Unset it for unused clocks*/
+		if (cl->disable_memcore_only) {
+			clk_set_flags(cl->clk, CLKFLAG_NORETAIN_MEM);
+			continue;
+		}
 		/*
 		 * For the clocks we control, set the rate prior to preparing
 		 * them.  Since we don't really have a load at this point, scale
@@ -3428,6 +3437,8 @@
 
 fail_clk_enable:
 	venus_hfi_for_each_clock_reverse_continue(device, cl, c) {
+		if (cl->disable_memcore_only)
+			continue;
 		dprintk(VIDC_ERR, "Clock: %s disable and unprepare\n",
 			cl->name);
 		clk_disable_unprepare(cl->clk);
@@ -3521,7 +3532,6 @@
 		devfreq_suspend_device(bus->devfreq);
 	}
 
-	device->bus_vote = DEFAULT_BUS_VOTE;
 	return 0;
 
 err_add_dev:
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index f4ea86b..dd94a88 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -492,6 +492,7 @@
 
 struct hfi_heic_frame_quality {
 	u32 frame_quality;
+	u32 reserved[3];
 };
 
 struct hfi_heic_grid_enable {
diff --git a/drivers/media/platform/msm/vidc_3x/hfi_packetization.c b/drivers/media/platform/msm/vidc_3x/hfi_packetization.c
index b15baaa..1de5bd1 100644
--- a/drivers/media/platform/msm/vidc_3x/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc_3x/hfi_packetization.c
@@ -658,9 +658,12 @@
 	case HAL_EXTRADATA_STREAM_USERDATA:
 		ret = HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA;
 		break;
-	case HAL_EXTRADATA_FRAME_QP:
+	case HAL_EXTRADATA_DEC_FRAME_QP:
 		ret = HFI_PROPERTY_PARAM_VDEC_FRAME_QP_EXTRADATA;
 		break;
+	case HAL_EXTRADATA_ENC_FRAME_QP:
+		ret = HFI_PROPERTY_PARAM_VENC_FRAME_QP_EXTRADATA;
+		break;
 	case HAL_EXTRADATA_FRAME_BITS_INFO:
 		ret = HFI_PROPERTY_PARAM_VDEC_FRAME_BITS_INFO_EXTRADATA;
 		break;
diff --git a/drivers/media/platform/msm/vidc_3x/msm_venc.c b/drivers/media/platform/msm/vidc_3x/msm_venc.c
index d129dc2..ef6e360 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_venc.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_venc.c
@@ -823,7 +823,7 @@
 		.name = "Extradata Type",
 		.type = V4L2_CTRL_TYPE_MENU,
 		.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
-		.maximum = V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO,
+		.maximum = V4L2_MPEG_VIDC_EXTRADATA_ENC_FRAME_QP,
 		.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
 		.menu_skip_mask = ~(
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
@@ -846,7 +846,8 @@
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS)|
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_ROI_QP) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO)
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO) |
+			(1ULL << V4L2_MPEG_VIDC_EXTRADATA_ENC_FRAME_QP)
 			),
 		.qmenu = mpeg_video_vidc_extradata,
 	},
@@ -1564,6 +1565,7 @@
 		case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
 		case V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER:
 		case V4L2_MPEG_VIDC_EXTRADATA_LTR:
+		case V4L2_MPEG_VIDC_EXTRADATA_ENC_FRAME_QP:
 		case V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI:
 			inst->fmts[CAPTURE_PORT].num_planes = 2;
 		default:
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c b/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
index bd58117..502a5c7 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
@@ -77,9 +77,11 @@
 	"Extradata output crop",
 	"Extradata display colour SEI",
 	"Extradata light level SEI",
+	"Extradata PQ Info",
 	"Extradata display VUI",
 	"Extradata vpx color space",
-	"Extradata PQ Info",
+	"Extradata UBWC CR stats info",
+	"Extradata enc frame QP"
 };
 
 struct getprop_buf {
@@ -4727,7 +4729,10 @@
 		ret = HAL_EXTRADATA_STREAM_USERDATA;
 		break;
 	case V4L2_MPEG_VIDC_EXTRADATA_FRAME_QP:
-		ret = HAL_EXTRADATA_FRAME_QP;
+		ret = HAL_EXTRADATA_DEC_FRAME_QP;
+		break;
+	case V4L2_MPEG_VIDC_EXTRADATA_ENC_FRAME_QP:
+		ret = HAL_EXTRADATA_ENC_FRAME_QP;
 		break;
 	case V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO:
 		ret = HAL_EXTRADATA_FRAME_BITS_INFO;
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h b/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h
index 93368f6..c7eb5f1 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h
@@ -316,7 +316,7 @@
 	s32 maximum;
 	s32 default_value;
 	u32 step;
-	u32 menu_skip_mask;
+	u64 menu_skip_mask;
 	u32 flags;
 	const char * const *qmenu;
 };
diff --git a/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h b/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h
index 1a25a58..875db09 100644
--- a/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h
@@ -112,7 +112,8 @@
 	HAL_EXTRADATA_ASPECT_RATIO,
 	HAL_EXTRADATA_MPEG2_SEQDISP,
 	HAL_EXTRADATA_STREAM_USERDATA,
-	HAL_EXTRADATA_FRAME_QP,
+	HAL_EXTRADATA_DEC_FRAME_QP,
+	HAL_EXTRADATA_ENC_FRAME_QP,
 	HAL_EXTRADATA_FRAME_BITS_INFO,
 	HAL_EXTRADATA_INPUT_CROP,
 	HAL_EXTRADATA_DIGITAL_ZOOM,
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 0413a86..5c9db09 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -1256,16 +1256,17 @@
 {
 	const struct s3c_camif_variant *variant = camif->variant;
 	const struct vp_pix_limits *pix_lim;
-	int i = ARRAY_SIZE(camif_mbus_formats);
+	unsigned int i;
 
 	/* FIXME: constraints against codec or preview path ? */
 	pix_lim = &variant->vp_pix_limits[VP_CODEC];
 
-	while (i-- >= 0)
+	for (i = 0; i < ARRAY_SIZE(camif_mbus_formats); i++)
 		if (camif_mbus_formats[i] == mf->code)
 			break;
 
-	mf->code = camif_mbus_formats[i];
+	if (i == ARRAY_SIZE(camif_mbus_formats))
+		mf->code = camif_mbus_formats[0];
 
 	if (pad == CAMIF_SD_PAD_SINK) {
 		v4l_bound_align_image(&mf->width, 8, CAMIF_MAX_PIX_WIDTH,
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index aceb38d..b1c3725 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -1167,6 +1167,7 @@
 		v4l2_ctrl_activate(dev->radio_rx_rds_ta, dev->radio_rx_rds_controls);
 		v4l2_ctrl_activate(dev->radio_rx_rds_tp, dev->radio_rx_rds_controls);
 		v4l2_ctrl_activate(dev->radio_rx_rds_ms, dev->radio_rx_rds_controls);
+		dev->radio_rx_dev.device_caps = dev->radio_rx_caps;
 		break;
 	case V4L2_CID_RDS_RECEPTION:
 		dev->radio_rx_rds_enabled = ctrl->val;
@@ -1241,6 +1242,7 @@
 		dev->radio_tx_caps &= ~V4L2_CAP_READWRITE;
 		if (!dev->radio_tx_rds_controls)
 			dev->radio_tx_caps |= V4L2_CAP_READWRITE;
+		dev->radio_tx_dev.device_caps = dev->radio_tx_caps;
 		break;
 	case V4L2_CID_RDS_TX_PTY:
 		if (dev->radio_rx_rds_controls)
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index d148463..6bf48a7 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -189,7 +189,7 @@
    USB 2.0 spec says bulk packet size is always 512 bytes
  */
 #define EM28XX_BULK_PACKET_MULTIPLIER 384
-#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 384
+#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 94
 
 #define EM28XX_INTERLACED_DEFAULT 1
 
diff --git a/drivers/media/usb/stkwebcam/stk-sensor.c b/drivers/media/usb/stkwebcam/stk-sensor.c
index e546b01..2dcc8d0 100644
--- a/drivers/media/usb/stkwebcam/stk-sensor.c
+++ b/drivers/media/usb/stkwebcam/stk-sensor.c
@@ -228,7 +228,7 @@
 static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
 {
 	int i = 0;
-	int tmpval = 0;
+	u8 tmpval = 0;
 
 	if (stk_camera_write_reg(dev, STK_IIC_TX_INDEX, reg))
 		return 1;
@@ -253,7 +253,7 @@
 static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
 {
 	int i = 0;
-	int tmpval = 0;
+	u8 tmpval = 0;
 
 	if (stk_camera_write_reg(dev, STK_IIC_RX_INDEX, reg))
 		return 1;
@@ -274,7 +274,7 @@
 	if (stk_camera_read_reg(dev, STK_IIC_RX_VALUE, &tmpval))
 		return 1;
 
-	*val = (u8) tmpval;
+	*val = tmpval;
 	return 0;
 }
 
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 22a9aae..1c48f2f 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -144,7 +144,7 @@
 		return 0;
 }
 
-int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
+int stk_camera_read_reg(struct stk_camera *dev, u16 index, u8 *value)
 {
 	struct usb_device *udev = dev->udev;
 	unsigned char *buf;
@@ -163,7 +163,7 @@
 			sizeof(u8),
 			500);
 	if (ret >= 0)
-		memcpy(value, buf, sizeof(u8));
+		*value = *buf;
 
 	kfree(buf);
 	return ret;
@@ -171,9 +171,10 @@
 
 static int stk_start_stream(struct stk_camera *dev)
 {
-	int value;
+	u8 value;
 	int i, ret;
-	int value_116, value_117;
+	u8 value_116, value_117;
+
 
 	if (!is_present(dev))
 		return -ENODEV;
@@ -213,7 +214,7 @@
 
 static int stk_stop_stream(struct stk_camera *dev)
 {
-	int value;
+	u8 value;
 	int i;
 	if (is_present(dev)) {
 		stk_camera_read_reg(dev, 0x0100, &value);
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.h b/drivers/media/usb/stkwebcam/stk-webcam.h
index 9bbfa3d..92bb48e 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.h
+++ b/drivers/media/usb/stkwebcam/stk-webcam.h
@@ -129,7 +129,7 @@
 #define vdev_to_camera(d) container_of(d, struct stk_camera, vdev)
 
 int stk_camera_write_reg(struct stk_camera *, u16, u8);
-int stk_camera_read_reg(struct stk_camera *, u16, int *);
+int stk_camera_read_reg(struct stk_camera *, u16, u8 *);
 
 int stk_sensor_init(struct stk_camera *);
 int stk_sensor_configure(struct stk_camera *);
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index ab3227b..760cbf2 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -104,7 +104,7 @@
 			if (nums[i-1] + 1 != nums[i])
 				goto fail_map;
 		buf->vaddr = (__force void *)
-				ioremap_nocache(nums[0] << PAGE_SHIFT, size);
+			ioremap_nocache(__pfn_to_phys(nums[0]), size + offset);
 	} else {
 		buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
 					PAGE_KERNEL);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 02b5f69..14cf6df 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2698,6 +2698,8 @@
 				__FILE__, __LINE__, iocnum);
 		return -ENODEV;
 	}
+	if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
+		return -EINVAL;
 	dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
 	    ioc->name));
 
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7ee1667..00dff9b 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1994,6 +1994,7 @@
 	.cmd_per_lun			= 7,
 	.use_clustering			= ENABLE_CLUSTERING,
 	.shost_attrs			= mptscsih_host_attrs,
+	.no_write_same			= 1,
 };
 
 static int mptsas_get_linkerrors(struct sas_phy *phy)
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index e3f4c39..a233173 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -825,4 +825,5 @@
 source "drivers/misc/genwqe/Kconfig"
 source "drivers/misc/echo/Kconfig"
 source "drivers/misc/cxl/Kconfig"
+source "drivers/misc/fpr_FingerprintCard/Kconfig"
 endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f1c9467..8e5d0f6 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -57,6 +57,7 @@
 obj-$(CONFIG_CXL_BASE)		+= cxl/
 obj-$(CONFIG_PANEL)             += panel.o
 obj-$(CONFIG_QPNP_MISC) 	+= qpnp-misc.o
+obj-$(CONFIG_FPR_FPC) 	+= fpr_FingerprintCard/
 obj-$(CONFIG_MEMORY_STATE_TIME)	+= memory_state_time.o
 
 obj-$(CONFIG_UID_SYS_STATS)	+= uid_sys_stats.o
diff --git a/drivers/misc/fpr_FingerprintCard/Kconfig b/drivers/misc/fpr_FingerprintCard/Kconfig
new file mode 100644
index 0000000..c9599e6
--- /dev/null
+++ b/drivers/misc/fpr_FingerprintCard/Kconfig
@@ -0,0 +1,10 @@
+#
+# FingerprintCard fingerprint driver
+#
+menu "FingerprintCard fingerprint driver"
+config FPR_FPC
+       default n
+       tristate "FPC_BTP fingerprint sensor support"
+       depends on SPI_MASTER
+
+endmenu
diff --git a/drivers/misc/fpr_FingerprintCard/Makefile b/drivers/misc/fpr_FingerprintCard/Makefile
new file mode 100644
index 0000000..96681eb
--- /dev/null
+++ b/drivers/misc/fpr_FingerprintCard/Makefile
@@ -0,0 +1,5 @@
+# Makefile for FingerprintCard fingerprint driver
+
+fpc1020-objs := fpc1020_platform_tee.o
+obj-$(CONFIG_FPR_FPC) += fpc1020.o
+
diff --git a/drivers/misc/fpr_FingerprintCard/fpc1020_platform_tee.c b/drivers/misc/fpr_FingerprintCard/fpc1020_platform_tee.c
new file mode 100644
index 0000000..a6ed374
--- /dev/null
+++ b/drivers/misc/fpr_FingerprintCard/fpc1020_platform_tee.c
@@ -0,0 +1,683 @@
+/*
+ * FPC1020 Fingerprint sensor device driver
+ *
+ * This driver will control the platform resources that the FPC fingerprint
+ * sensor needs to operate. The major things are probing the sensor to check
+ * that it is actually connected and let the Kernel know this and with that also
+ * enabling and disabling of regulators, controlling GPIOs such as sensor reset
+ * line, sensor IRQ line.
+ *
+ * The driver will expose most of its available functionality in sysfs which
+ * enables dynamic control of these features from eg. a user space process.
+ *
+ * The sensor's IRQ events will be pushed to Kernel's event handling system and
+ * are exposed in the drivers event node.
+ *
+ * This driver will NOT send any commands to the sensor it only controls the
+ * electrical parts.
+ *
+ *
+ * Copyright (c) 2015 Fingerprint Cards AB <tech@fingerprints.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License Version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pinctrl/consumer.h>
+
+
+#define FPC_TTW_HOLD_TIME		1000
+#define RESET_LOW_SLEEP_MIN_US		5000
+#define RESET_LOW_SLEEP_MAX_US		(RESET_LOW_SLEEP_MIN_US + 100)
+#define RESET_HIGH_SLEEP1_MIN_US	100
+#define RESET_HIGH_SLEEP1_MAX_US	(RESET_HIGH_SLEEP1_MIN_US + 100)
+#define RESET_HIGH_SLEEP2_MIN_US	5000
+#define RESET_HIGH_SLEEP2_MAX_US	(RESET_HIGH_SLEEP2_MIN_US + 100)
+#define PWR_ON_SLEEP_MIN_US		100
+#define PWR_ON_SLEEP_MAX_US		(PWR_ON_SLEEP_MIN_US + 900)
+#define NUM_PARAMS_REG_ENABLE_SET	2
+
+#define RELEASE_WAKELOCK_W_V		"release_wakelock_with_verification"
+#define RELEASE_WAKELOCK		"release_wakelock"
+#define START_IRQS_RECEIVED_CNT		"start_irqs_received_counter"
+
+static const char * const pctl_names[] = {
+	"fpc1020_reset_reset",
+	"fpc1020_reset_active",
+	"fpc1020_irq_active",
+};
+
+struct vreg_config {
+	char *name;
+	unsigned long vmin;
+	unsigned long vmax;
+	int ua_load;
+};
+
+static const struct vreg_config vreg_conf[] = {
+	{ "vdd_ana", 1800000UL, 1800000UL, 6000, },
+	{ "vcc_spi", 1800000UL, 1800000UL, 10, },
+	{ "vdd_io", 1800000UL, 1800000UL, 6000, },
+};
+
+struct fpc1020_data {
+	struct device *dev;
+	struct pinctrl *fingerprint_pinctrl;
+	struct pinctrl_state *pinctrl_state[ARRAY_SIZE(pctl_names)];
+	struct regulator *vreg[ARRAY_SIZE(vreg_conf)];
+	struct wakeup_source ttw_wl;
+	struct mutex lock; /* To set/get exported values in sysfs */
+	int irq_gpio;
+	int rst_gpio;
+	int nbr_irqs_received;
+	int nbr_irqs_received_counter_start;
+	bool prepared;
+	atomic_t wakeup_enabled; /* Used both in ISR and non-ISR */
+};
+
+static int vreg_setup(struct fpc1020_data *fpc1020, const char *name,
+	bool enable)
+{
+	size_t i;
+	int rc;
+	struct regulator *vreg;
+	struct device *dev = fpc1020->dev;
+
+	for (i = 0; i < ARRAY_SIZE(vreg_conf); i++) {
+		const char *n = vreg_conf[i].name;
+
+		if (!memcmp(n, name, strlen(n)))
+			goto found;
+	}
+
+	dev_err(dev, "Regulator %s not found\n", name);
+
+	return -EINVAL;
+
+found:
+	vreg = fpc1020->vreg[i];
+	if (enable) {
+		if (!vreg) {
+			vreg = devm_regulator_get(dev, name);
+			if (IS_ERR_OR_NULL(vreg)) {
+				dev_err(dev, "Unable to get %s\n", name);
+				return PTR_ERR(vreg);
+			}
+		}
+
+		if (regulator_count_voltages(vreg) > 0) {
+			rc = regulator_set_voltage(vreg, vreg_conf[i].vmin,
+					vreg_conf[i].vmax);
+			if (rc)
+				dev_err(dev,
+					"Unable to set voltage on %s, %d\n",
+					name, rc);
+		}
+
+		rc = regulator_set_load(vreg, vreg_conf[i].ua_load);
+		if (rc < 0)
+			dev_err(dev, "Unable to set current on %s, %d\n",
+					name, rc);
+
+		rc = regulator_enable(vreg);
+		if (rc) {
+			dev_err(dev, "error enabling %s: %d\n", name, rc);
+			vreg = NULL;
+		}
+		fpc1020->vreg[i] = vreg;
+	} else {
+		if (vreg) {
+			if (regulator_is_enabled(vreg)) {
+				regulator_disable(vreg);
+				dev_dbg(dev, "disabled %s\n", name);
+			}
+			fpc1020->vreg[i] = NULL;
+		}
+		rc = 0;
+	}
+
+	return rc;
+}
+
+/*
+ * sysfs node for controlling clocks.
+ *
+ * This is disabled in platform variant of this driver but kept for
+ * backwards compatibility. Only prints a debug print that it is
+ * disabled.
+ */
+static ssize_t clk_enable_set(struct device *dev,
+	struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	dev_dbg(dev,
+		"clk_enable sysfs node not enabled in platform driver\n");
+
+	return count;
+}
+static DEVICE_ATTR(clk_enable, 0200, NULL, clk_enable_set);
+
+/*
+ * Will try to select the set of pins (GPIOS) defined in a pin control node of
+ * the device tree named @p name.
+ *
+ * The node can contain several eg. GPIOs that is controlled when selecting it.
+ * The node may activate or deactivate the pins it contains, the action is
+ * defined in the device tree node itself and not here. The states used
+ * internally is fetched at probe time.
+ *
+ * @see pctl_names
+ * @see fpc1020_probe
+ */
+static int select_pin_ctl(struct fpc1020_data *fpc1020, const char *name)
+{
+	size_t i;
+	int rc;
+	struct device *dev = fpc1020->dev;
+
+	for (i = 0; i < ARRAY_SIZE(pctl_names); i++) {
+		const char *n = pctl_names[i];
+
+		if (!memcmp(n, name, strlen(n))) {
+			rc = pinctrl_select_state(fpc1020->fingerprint_pinctrl,
+					fpc1020->pinctrl_state[i]);
+			if (rc)
+				dev_err(dev, "cannot select '%s'\n", name);
+			else
+				dev_dbg(dev, "Selected '%s'\n", name);
+			goto exit;
+		}
+	}
+
+	rc = -EINVAL;
+	dev_err(dev, "%s:'%s' not found\n", __func__, name);
+
+exit:
+	return rc;
+}
+
+static ssize_t pinctl_set(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+	int rc;
+
+	mutex_lock(&fpc1020->lock);
+	rc = select_pin_ctl(fpc1020, buf);
+	mutex_unlock(&fpc1020->lock);
+
+	return rc ? rc : count;
+}
+static DEVICE_ATTR(pinctl_set, 0200, NULL, pinctl_set);
+
+static ssize_t regulator_enable_set(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+	char op;
+	char name[16];
+	int rc;
+	bool enable;
+
+	if (sscanf(buf, "%15[^,],%c", name, &op) != NUM_PARAMS_REG_ENABLE_SET)
+		return -EINVAL;
+	if (op == 'e')
+		enable = true;
+	else if (op == 'd')
+		enable = false;
+	else
+		return -EINVAL;
+
+	mutex_lock(&fpc1020->lock);
+	rc = vreg_setup(fpc1020, name, enable);
+	mutex_unlock(&fpc1020->lock);
+
+	return rc ? rc : count;
+}
+static DEVICE_ATTR(regulator_enable, 0200, NULL, regulator_enable_set);
+
+static int hw_reset(struct fpc1020_data *fpc1020)
+{
+	int irq_gpio;
+	int rc;
+
+	irq_gpio = gpio_get_value(fpc1020->irq_gpio);
+
+	rc = select_pin_ctl(fpc1020, "fpc1020_reset_active");
+
+	if (rc)
+		goto exit;
+
+	usleep_range(RESET_HIGH_SLEEP1_MIN_US, RESET_HIGH_SLEEP1_MAX_US);
+
+	rc = select_pin_ctl(fpc1020, "fpc1020_reset_reset");
+
+	if (rc)
+		goto exit;
+	usleep_range(RESET_LOW_SLEEP_MIN_US, RESET_LOW_SLEEP_MAX_US);
+
+	rc = select_pin_ctl(fpc1020, "fpc1020_reset_active");
+	if (rc)
+		goto exit;
+	usleep_range(RESET_HIGH_SLEEP2_MIN_US, RESET_HIGH_SLEEP2_MAX_US);
+
+	irq_gpio = gpio_get_value(fpc1020->irq_gpio);
+
+exit:
+	return rc;
+}
+
+static ssize_t hw_reset_set(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int rc = -EINVAL;
+	struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+
+	if (!memcmp(buf, "reset", strlen("reset"))) {
+		mutex_lock(&fpc1020->lock);
+		rc = hw_reset(fpc1020);
+		mutex_unlock(&fpc1020->lock);
+	} else {
+		return rc;
+	}
+
+	return rc ? rc : count;
+}
+static DEVICE_ATTR(hw_reset, 0200, NULL, hw_reset_set);
+
+/*
+ * Will setup GPIOs, and regulators to correctly initialize the touch sensor to
+ * be ready for work.
+ *
+ * In the correct order according to the sensor spec this function will
+ * enable/disable regulators, and reset line, all to set the sensor in a
+ * correct power on or off state "electrical" wise.
+ *
+ * @see  device_prepare_set
+ * @note This function will not send any commands to the sensor it will only
+ *       control it "electrically".
+ */
+static int device_prepare(struct fpc1020_data *fpc1020, bool enable)
+{
+	int rc = 0;
+
+	mutex_lock(&fpc1020->lock);
+	if (enable && !fpc1020->prepared) {
+		fpc1020->prepared = true;
+		select_pin_ctl(fpc1020, "fpc1020_reset_reset");
+
+		rc = vreg_setup(fpc1020, "vcc_spi", true);
+		if (rc)
+			goto exit;
+
+		rc = vreg_setup(fpc1020, "vdd_io", true);
+		if (rc)
+			goto exit_1;
+
+		rc = vreg_setup(fpc1020, "vdd_ana", true);
+		if (rc)
+			goto exit_2;
+
+		usleep_range(PWR_ON_SLEEP_MIN_US, PWR_ON_SLEEP_MAX_US);
+
+		(void)select_pin_ctl(fpc1020, "fpc1020_reset_active");
+	} else if (!enable && fpc1020->prepared) {
+		rc = 0;
+		(void)select_pin_ctl(fpc1020, "fpc1020_reset_reset");
+
+		usleep_range(PWR_ON_SLEEP_MIN_US, PWR_ON_SLEEP_MAX_US);
+
+		(void)vreg_setup(fpc1020, "vdd_ana", false);
+exit_2:
+		(void)vreg_setup(fpc1020, "vdd_io", false);
+exit_1:
+		(void)vreg_setup(fpc1020, "vcc_spi", false);
+exit:
+		fpc1020->prepared = false;
+	}
+
+	mutex_unlock(&fpc1020->lock);
+
+	return rc;
+}
+
+/*
+ * sysfs node to enable/disable (power up/power down) the touch sensor
+ *
+ * @see device_prepare
+ */
+static ssize_t device_prepare_set(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int rc;
+	struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+
+	if (!memcmp(buf, "enable", strlen("enable")))
+		rc = device_prepare(fpc1020, true);
+	else if (!memcmp(buf, "disable", strlen("disable")))
+		rc = device_prepare(fpc1020, false);
+	else
+		return -EINVAL;
+
+	return rc ? rc : count;
+}
+static DEVICE_ATTR(device_prepare, 0200, NULL, device_prepare_set);
+
+/**
+ * sysfs node for controlling whether the driver is allowed
+ * to wake up the platform on interrupt.
+ */
+static ssize_t wakeup_enable_set(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+	ssize_t ret = count;
+
+	mutex_lock(&fpc1020->lock);
+	if (!memcmp(buf, "enable", strlen("enable")))
+		atomic_set(&fpc1020->wakeup_enabled, 1);
+	else if (!memcmp(buf, "disable", strlen("disable")))
+		atomic_set(&fpc1020->wakeup_enabled, 0);
+	else
+		ret = -EINVAL;
+	mutex_unlock(&fpc1020->lock);
+
+	return ret;
+}
+static DEVICE_ATTR(wakeup_enable, 0200, NULL, wakeup_enable_set);
+
+
+/*
+ * sysfs node for controlling the wakelock.
+ */
+static ssize_t handle_wakelock_cmd(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct  fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+	ssize_t ret = count;
+
+	mutex_lock(&fpc1020->lock);
+	if (!memcmp(buf, RELEASE_WAKELOCK_W_V,
+		min(count, strlen(RELEASE_WAKELOCK_W_V)))) {
+		if (fpc1020->nbr_irqs_received_counter_start ==
+				fpc1020->nbr_irqs_received) {
+			__pm_relax(&fpc1020->ttw_wl);
+		} else {
+				dev_dbg(dev, "Ignore releasing of wakelock %d != %d",
+				fpc1020->nbr_irqs_received_counter_start,
+				fpc1020->nbr_irqs_received);
+		}
+	} else if (!memcmp(buf, RELEASE_WAKELOCK, min(count,
+				strlen(RELEASE_WAKELOCK)))) {
+		__pm_relax(&fpc1020->ttw_wl);
+	} else if (!memcmp(buf, START_IRQS_RECEIVED_CNT,
+			min(count, strlen(START_IRQS_RECEIVED_CNT)))) {
+		fpc1020->nbr_irqs_received_counter_start =
+		fpc1020->nbr_irqs_received;
+	} else
+		ret = -EINVAL;
+	mutex_unlock(&fpc1020->lock);
+
+	return ret;
+}
+static DEVICE_ATTR(handle_wakelock, 0200, NULL, handle_wakelock_cmd);
+
+/*
+ * sysf node to check the interrupt status of the sensor, the interrupt
+ * handler should perform sysf_notify to allow userland to poll the node.
+ */
+static ssize_t irq_get(struct device *dev,
+	struct device_attribute *attr,
+	char *buf)
+{
+	struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+	int irq = gpio_get_value(fpc1020->irq_gpio);
+
+	return scnprintf(buf, PAGE_SIZE, "%i\n", irq);
+}
+
+/*
+ * writing to the irq node will just drop a printk message
+ * and return success, used for latency measurement.
+ */
+static ssize_t irq_ack(struct device *dev,
+	struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+
+	dev_dbg(fpc1020->dev, "%s\n", __func__);
+
+	return count;
+}
+static DEVICE_ATTR(irq, 0600 | 0200, irq_get, irq_ack);
+
+static struct attribute *attributes[] = {
+	&dev_attr_pinctl_set.attr,
+	&dev_attr_device_prepare.attr,
+	&dev_attr_regulator_enable.attr,
+	&dev_attr_hw_reset.attr,
+	&dev_attr_wakeup_enable.attr,
+	&dev_attr_handle_wakelock.attr,
+	&dev_attr_clk_enable.attr,
+	&dev_attr_irq.attr,
+	NULL
+};
+
+static const struct attribute_group attribute_group = {
+	.attrs = attributes,
+};
+
+static irqreturn_t fpc1020_irq_handler(int irq, void *handle)
+{
+	struct fpc1020_data *fpc1020 = handle;
+
+	pr_info("fpc1020 irq handler: %s\n", __func__);
+	mutex_lock(&fpc1020->lock);
+	if (atomic_read(&fpc1020->wakeup_enabled)) {
+		fpc1020->nbr_irqs_received++;
+		__pm_wakeup_event(&fpc1020->ttw_wl,
+					msecs_to_jiffies(FPC_TTW_HOLD_TIME));
+	}
+	mutex_unlock(&fpc1020->lock);
+
+	sysfs_notify(&fpc1020->dev->kobj, NULL, dev_attr_irq.attr.name);
+
+	return IRQ_HANDLED;
+}
+
+static int fpc1020_request_named_gpio(struct fpc1020_data *fpc1020,
+	const char *label, int *gpio)
+{
+	struct device *dev = fpc1020->dev;
+	struct device_node *np = dev->of_node;
+	int rc;
+
+	rc = of_get_named_gpio(np, label, 0);
+
+	if (rc < 0) {
+		dev_err(dev, "failed to get '%s'\n", label);
+		return rc;
+	}
+	*gpio = rc;
+
+	rc = devm_gpio_request(dev, *gpio, label);
+	if (rc) {
+		dev_err(dev, "failed to request gpio %d\n", *gpio);
+		return rc;
+	}
+	dev_dbg(dev, "%s %d\n", label, *gpio);
+
+	return 0;
+}
+
+static int fpc1020_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int rc = 0;
+	size_t i;
+	int irqf;
+	struct fpc1020_data *fpc1020 = devm_kzalloc(dev, sizeof(*fpc1020),
+			GFP_KERNEL);
+	if (!fpc1020) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+
+	fpc1020->dev = dev;
+	platform_set_drvdata(pdev, fpc1020);
+
+	rc = fpc1020_request_named_gpio(fpc1020, "fpc,gpio_irq",
+			&fpc1020->irq_gpio);
+	if (rc)
+		goto exit;
+	rc = fpc1020_request_named_gpio(fpc1020, "fpc,gpio_rst",
+			&fpc1020->rst_gpio);
+	if (rc)
+		goto exit;
+
+	fpc1020->fingerprint_pinctrl = devm_pinctrl_get(dev);
+	if (IS_ERR(fpc1020->fingerprint_pinctrl)) {
+		if (PTR_ERR(fpc1020->fingerprint_pinctrl) == -EPROBE_DEFER) {
+			dev_info(dev, "pinctrl not ready\n");
+			rc = -EPROBE_DEFER;
+			goto exit;
+		}
+		dev_err(dev, "Target does not use pinctrl\n");
+		fpc1020->fingerprint_pinctrl = NULL;
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(pctl_names); i++) {
+		const char *n = pctl_names[i];
+		struct pinctrl_state *state =
+			pinctrl_lookup_state(fpc1020->fingerprint_pinctrl, n);
+		if (IS_ERR(state)) {
+			dev_err(dev, "cannot find '%s'\n", n);
+			rc = -EINVAL;
+			goto exit;
+		}
+		dev_info(dev, "found pin control %s\n", n);
+		fpc1020->pinctrl_state[i] = state;
+	}
+
+	rc = select_pin_ctl(fpc1020, "fpc1020_reset_reset");
+	if (rc)
+		goto exit;
+	rc = select_pin_ctl(fpc1020, "fpc1020_irq_active");
+	if (rc)
+		goto exit;
+
+	atomic_set(&fpc1020->wakeup_enabled, 0);
+
+	irqf = IRQF_TRIGGER_RISING | IRQF_ONESHOT;
+	if (of_property_read_bool(dev->of_node, "fpc,enable-wakeup")) {
+		irqf |= IRQF_NO_SUSPEND;
+		device_init_wakeup(dev, 1);
+	}
+
+	mutex_init(&fpc1020->lock);
+	rc = devm_request_threaded_irq(dev, gpio_to_irq(fpc1020->irq_gpio),
+			NULL, fpc1020_irq_handler, irqf,
+			dev_name(dev), fpc1020);
+	if (rc) {
+		dev_err(dev, "could not request irq %d\n",
+				gpio_to_irq(fpc1020->irq_gpio));
+		goto exit;
+	}
+
+	dev_info(dev, "requested irq %d\n", gpio_to_irq(fpc1020->irq_gpio));
+
+	/* Request that the interrupt should be wakeable */
+	enable_irq_wake(gpio_to_irq(fpc1020->irq_gpio));
+
+	wakeup_source_init(&fpc1020->ttw_wl, "fpc_ttw_wl");
+
+	rc = sysfs_create_group(&dev->kobj, &attribute_group);
+	if (rc) {
+		dev_err(dev, "could not create sysfs\n");
+		goto exit;
+	}
+
+	if (of_property_read_bool(dev->of_node, "fpc,enable-on-boot")) {
+		dev_info(dev, "Enabling hardware\n");
+		(void)device_prepare(fpc1020, true);
+	}
+
+	rc = hw_reset(fpc1020);
+
+	dev_info(dev, "%s: ok\n", __func__);
+
+exit:
+	return rc;
+}
+
+static int fpc1020_remove(struct platform_device *pdev)
+{
+	struct fpc1020_data *fpc1020 = platform_get_drvdata(pdev);
+
+	sysfs_remove_group(&pdev->dev.kobj, &attribute_group);
+	mutex_destroy(&fpc1020->lock);
+	wakeup_source_trash(&fpc1020->ttw_wl);
+	(void)vreg_setup(fpc1020, "vdd_ana", false);
+	(void)vreg_setup(fpc1020, "vdd_io", false);
+	(void)vreg_setup(fpc1020, "vcc_spi", false);
+	dev_info(&pdev->dev, "%s\n", __func__);
+
+	return 0;
+}
+
+static const struct of_device_id fpc1020_of_match[] = {
+	{ .compatible = "fpc,fpc1020", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, fpc1020_of_match);
+
+static struct platform_driver fpc1020_driver = {
+	.driver = {
+		.name	= "fpc1020",
+		.owner	= THIS_MODULE,
+		.of_match_table = fpc1020_of_match,
+	},
+	.probe	= fpc1020_probe,
+	.remove	= fpc1020_remove,
+};
+
+static int __init fpc1020_init(void)
+{
+	int rc = platform_driver_register(&fpc1020_driver);
+
+	if (!rc)
+		pr_info("%s OK\n", __func__);
+	else
+		pr_err("%s %d\n", __func__, rc);
+
+	return rc;
+}
+
+static void __exit fpc1020_exit(void)
+{
+	pr_info("%s\n", __func__);
+	platform_driver_unregister(&fpc1020_driver);
+}
+
+module_init(fpc1020_init);
+module_exit(fpc1020_exit);
+
+
+MODULE_DESCRIPTION("FPC1020 Fingerprint sensor device driver.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 2b31ed3..07ae56f 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -4061,8 +4061,11 @@
 	int retry = 0;
 
 	do {
-		if (retry++)
+		if (retry++) {
+			mutex_unlock(&app_access_lock);
 			msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
+			mutex_lock(&app_access_lock);
+		}
 		ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
 			SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), 0);
 	} while (IS_ERR_OR_NULL(ihandle) &&
@@ -4212,8 +4215,12 @@
 	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
 			&resp, sizeof(resp));
 	if (ret) {
-		pr_err("scm_call to load failed : ret %d\n", ret);
-		ret = -EIO;
+		pr_err("scm_call to load failed : ret %d, result %x\n",
+			ret, resp.result);
+		if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
+			ret = -EEXIST;
+		else
+			ret = -EIO;
 		goto exit_disable_clk_vote;
 	}
 
@@ -4465,6 +4472,7 @@
 	}
 	mutex_lock(&app_access_lock);
 
+recheck:
 	app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
 	strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
 	ret = __qseecom_check_app_exists(app_ireq, &app_id);
@@ -4494,7 +4502,10 @@
 		pr_debug("%s: Loading app for the first time'\n",
 				qseecom.pdev->init_name);
 		ret = __qseecom_load_fw(data, app_name, &app_id);
-		if (ret < 0)
+		if (ret == -EEXIST) {
+			pr_err("recheck if TA %s is loaded\n", app_name);
+			goto recheck;
+		} else if (ret < 0)
 			goto exit_ion_free;
 	}
 	data->client.app_id = app_id;
@@ -6979,19 +6990,6 @@
 		break;
 	}
 
-	case QSEECOM_IOCTL_SET_ENCDEC_INFO: {
-		struct qseecom_encdec_conf_t conf;
-
-		ret = copy_from_user(&conf, argp, sizeof(conf));
-		if (ret) {
-			pr_err("copy_from_user failed\n");
-			return -EFAULT;
-		}
-		ret = qcom_ice_set_fde_conf(conf.start_sector, conf.fs_size,
-					conf.index, conf.mode);
-		break;
-	}
-
 	case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
 		if ((data->listener.id == 0) ||
 			(data->type != QSEECOM_LISTENER_SERVICE)) {
@@ -8780,6 +8778,7 @@
 static int qseecom_remove(struct platform_device *pdev)
 {
 	struct qseecom_registered_kclient_list *kclient = NULL;
+	struct qseecom_registered_kclient_list *kclient_tmp = NULL;
 	unsigned long flags = 0;
 	int ret = 0;
 	int i;
@@ -8789,10 +8788,8 @@
 	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
 	spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
 
-	list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
-								list) {
-		if (!kclient)
-			goto exit_irqrestore;
+	list_for_each_entry_safe(kclient, kclient_tmp,
+		&qseecom.registered_kclient_list_head, list) {
 
 		/* Break the loop if client handle is NULL */
 		if (!kclient->handle)
@@ -8816,7 +8813,7 @@
 	kzfree(kclient->handle);
 exit_free_kclient:
 	kzfree(kclient);
-exit_irqrestore:
+
 	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
 
 	if (qseecom.qseos_version > QSEEE_VERSION_00)
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 2405ae3..f78b659 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -3630,7 +3630,7 @@
 	 * or disable state so cannot receive any completion of
 	 * other requests.
 	 */
-	BUG_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+	WARN_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
 
 	/* clear pending request */
 	BUG_ON(!test_and_clear_bit(cmdq_req->tag,
@@ -3664,7 +3664,7 @@
 out:
 
 	mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd);
-	if (!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
+	if (!(err || cmdq_req->resp_err)) {
 		mmc_host_clk_release(host);
 		wake_up(&ctx_info->wait);
 		mmc_put_card(host->card);
@@ -4099,6 +4099,13 @@
 			if (ret == -EBUSY || ret == -EAGAIN) {
 				mmc_blk_cmdq_requeue_rw_rq(mq, req);
 				mmc_put_card(host->card);
+			} else if (ret == -ENOMEM) {
+				/*
+				 * Elaborate error handling is not needed for
+				 * system errors. Let the higher layer decide
+				 * on the next steps.
+				 */
+				goto out;
 			}
 		}
 	}
@@ -4694,9 +4701,7 @@
 
 	dev_set_drvdata(&card->dev, md);
 
-#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 	mmc_set_bus_resume_policy(card->host, 1);
-#endif
 
 	if (mmc_add_disk(md))
 		goto out;
@@ -4741,9 +4746,7 @@
 	pm_runtime_put_noidle(&card->dev);
 	mmc_blk_remove_req(md);
 	dev_set_drvdata(&card->dev, NULL);
-#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 	mmc_set_bus_resume_policy(card->host, 0);
-#endif
 }
 
 static int _mmc_blk_suspend(struct mmc_card *card, bool wait)
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index aa8373d..66165d9 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -314,6 +314,8 @@
 						host->max_req_size / 512));
 	blk_queue_max_segment_size(mq->queue, host->max_seg_size);
 	blk_queue_max_segments(mq->queue, host->max_segs);
+	if (host->inlinecrypt_support)
+		queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, mq->queue);
 }
 
 /**
@@ -483,6 +485,9 @@
 success:
 	sema_init(&mq->thread_sem, 1);
 
+	if (host->inlinecrypt_support)
+		queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, mq->queue);
+
 	/* hook for pm qos legacy init */
 	if (card->host->ops->init)
 		card->host->ops->init(card->host);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 8700e72..69465f8 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1972,10 +1972,9 @@
  */
 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
 {
-#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 	if (mmc_bus_needs_resume(host))
 		mmc_resume_bus(host);
-#endif
+
 	__mmc_start_req(host, mrq);
 
 	if (!mrq->cap_cmd_during_tfr)
@@ -2418,10 +2417,9 @@
 {
 	pm_runtime_get_sync(&card->dev);
 	mmc_claim_host(card->host);
-#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+
 	if (mmc_bus_needs_resume(card->host))
 		mmc_resume_bus(card->host);
-#endif
 }
 EXPORT_SYMBOL(mmc_get_card);
 
@@ -3380,6 +3378,7 @@
 {
 	unsigned long flags;
 	int err = 0;
+	int card_present = true;
 
 	if (!mmc_bus_needs_resume(host))
 		return -EINVAL;
@@ -3390,10 +3389,28 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 
 	mmc_bus_get(host);
-	if (host->bus_ops && !host->bus_dead && host->card) {
+	if (host->ops->get_cd)
+		card_present = host->ops->get_cd(host);
+
+	if (host->bus_ops && !host->bus_dead && host->card && card_present) {
 		mmc_power_up(host, host->card->ocr);
 		BUG_ON(!host->bus_ops->resume);
-		host->bus_ops->resume(host);
+		err = host->bus_ops->resume(host);
+		if (err) {
+			pr_err("%s: %s: resume failed: %d\n",
+				       mmc_hostname(host), __func__, err);
+			/*
+			 * If we have cd-gpio based detection mechanism and
+			 * deferred resume is supported, we will not detect
+			 * card removal event when system is suspended. So if
+			 * resume fails after a system suspend/resume,
+			 * schedule the work to detect card presence.
+			 */
+			if (mmc_card_is_removable(host) &&
+					!(host->caps & MMC_CAP_NEEDS_POLL)) {
+				mmc_detect_change(host, 0);
+			}
+		}
 		if (mmc_card_cmdq(host->card)) {
 			err = mmc_cmdq_halt(host, false);
 			if (err)
@@ -4703,7 +4720,7 @@
 	struct mmc_host *host = container_of(
 		notify_block, struct mmc_host, pm_notify);
 	unsigned long flags;
-	int err = 0;
+	int err = 0, present = 0;
 
 	switch (mode) {
 	case PM_RESTORE_PREPARE:
@@ -4750,8 +4767,12 @@
 
 		spin_lock_irqsave(&host->lock, flags);
 		host->rescan_disable = 0;
+		if (host->ops->get_cd)
+			present = host->ops->get_cd(host);
+
 		if (mmc_bus_manual_resume(host) &&
-				!host->ignore_bus_resume_flags) {
+				!host->ignore_bus_resume_flags &&
+				present) {
 			spin_unlock_irqrestore(&host->lock, flags);
 			break;
 		}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 965d1f0..71a1cfb 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1169,6 +1169,9 @@
  */
 static int mmc_sd_alive(struct mmc_host *host)
 {
+	if (host->ops->get_cd && !host->ops->get_cd(host))
+		return -ENOMEDIUM;
+
 	return mmc_send_status(host->card, NULL);
 }
 
@@ -1197,6 +1200,16 @@
 		return;
 	}
 
+	if (mmc_bus_needs_resume(host))
+		mmc_resume_bus(host);
+
+	if (host->ops->get_cd && !host->ops->get_cd(host)) {
+		err = -ENOMEDIUM;
+		mmc_card_set_removed(host->card);
+		mmc_card_clr_suspended(host->card);
+		goto out;
+	}
+
 	/*
 	 * Just check if our card has been removed.
 	 */
@@ -1219,6 +1232,7 @@
 	err = _mmc_detect_card_removed(host);
 #endif
 
+out:
 	mmc_put_card(host->card);
 
 	if (err) {
@@ -1303,6 +1317,11 @@
 	if (!mmc_card_suspended(host->card))
 		goto out;
 
+	if (host->ops->get_cd && !host->ops->get_cd(host)) {
+		mmc_card_clr_suspended(host->card);
+		goto out;
+	}
+
 	mmc_power_up(host, host->card->ocr);
 #ifdef CONFIG_MMC_PARANOID_SD_INIT
 	retries = 5;
@@ -1329,6 +1348,8 @@
 			mmc_hostname(host), __func__, err);
 		mmc_card_set_removed(host->card);
 		mmc_detect_change(host, msecs_to_jiffies(200));
+	} else if (err) {
+		goto out;
 	}
 	mmc_card_clr_suspended(host->card);
 
@@ -1398,6 +1419,9 @@
 
 static int mmc_sd_reset(struct mmc_host *host)
 {
+	if (host->ops->get_cd && !host->ops->get_cd(host))
+		return -ENOMEDIUM;
+
 	mmc_power_cycle(host, host->card->ocr);
 	return mmc_sd_init_card(host, host->card->ocr, host->card);
 }
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 91ad946..090f64da 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -35,6 +35,10 @@
 {
 	/* Schedule a card detection after a debounce timeout */
 	struct mmc_host *host = dev_id;
+	int present = host->ops->get_cd(host);
+
+	pr_debug("%s: cd gpio irq, gpio state %d (CARD_%s)\n",
+		mmc_hostname(host), present, present?"INSERT":"REMOVAL");
 
 	host->trigger_card_event = true;
 	mmc_detect_change(host, msecs_to_jiffies(200));
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 50dd6bd..524c8e0 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -33,6 +33,8 @@
 	const struct sdhci_iproc_data *data;
 	u32 shadow_cmd;
 	u32 shadow_blk;
+	bool is_cmd_shadowed;
+	bool is_blk_shadowed;
 };
 
 #define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
@@ -48,8 +50,22 @@
 
 static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
 {
-	u32 val = sdhci_iproc_readl(host, (reg & ~3));
-	u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
+	u32 val;
+	u16 word;
+
+	if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) {
+		/* Get the saved transfer mode */
+		val = iproc_host->shadow_cmd;
+	} else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
+		   iproc_host->is_blk_shadowed) {
+		/* Get the saved block info */
+		val = iproc_host->shadow_blk;
+	} else {
+		val = sdhci_iproc_readl(host, (reg & ~3));
+	}
+	word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
 	return word;
 }
 
@@ -105,13 +121,15 @@
 
 	if (reg == SDHCI_COMMAND) {
 		/* Write the block now as we are issuing a command */
-		if (iproc_host->shadow_blk != 0) {
+		if (iproc_host->is_blk_shadowed) {
 			sdhci_iproc_writel(host, iproc_host->shadow_blk,
 				SDHCI_BLOCK_SIZE);
-			iproc_host->shadow_blk = 0;
+			iproc_host->is_blk_shadowed = false;
 		}
 		oldval = iproc_host->shadow_cmd;
-	} else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
+		iproc_host->is_cmd_shadowed = false;
+	} else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
+		   iproc_host->is_blk_shadowed) {
 		/* Block size and count are stored in shadow reg */
 		oldval = iproc_host->shadow_blk;
 	} else {
@@ -123,9 +141,11 @@
 	if (reg == SDHCI_TRANSFER_MODE) {
 		/* Save the transfer mode until the command is issued */
 		iproc_host->shadow_cmd = newval;
+		iproc_host->is_cmd_shadowed = true;
 	} else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
 		/* Save the block info until the command is issued */
 		iproc_host->shadow_blk = newval;
+		iproc_host->is_blk_shadowed = true;
 	} else {
 		/* Command or other regular 32-bit write */
 		sdhci_iproc_writel(host, newval, reg & ~3);
@@ -176,7 +196,6 @@
 	.caps1 = SDHCI_DRIVER_TYPE_C |
 		 SDHCI_DRIVER_TYPE_D |
 		 SDHCI_SUPPORT_DDR50,
-	.mmc_caps = MMC_CAP_1_8V_DDR,
 };
 
 static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index d91eb67..7d24213 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -5075,6 +5075,7 @@
 			goto vreg_deinit;
 		}
 		host->is_crypto_en = true;
+		msm_host->mmc->inlinecrypt_support = true;
 		/* Packed commands cannot be encrypted/decrypted using ICE */
 		msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR |
 				MMC_CAP2_PACKED_WR_CONTROL);
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 5e1b68c..e1b603c 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -45,6 +45,7 @@
 #define I82802AB	0x00ad
 #define I82802AC	0x00ac
 #define PF38F4476	0x881c
+#define M28F00AP30	0x8963
 /* STMicroelectronics chips */
 #define M50LPW080       0x002F
 #define M50FLW080A	0x0080
@@ -375,6 +376,17 @@
 		extp->MinorVersion = '1';
 }
 
+static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
+{
+	/*
+	 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
+	 * Erase Supend for their small Erase Blocks(0x8000)
+	 */
+	if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
+		return 1;
+	return 0;
+}
+
 static inline struct cfi_pri_intelext *
 read_pri_intelext(struct map_info *map, __u16 adr)
 {
@@ -831,21 +843,30 @@
 		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
 			goto sleep;
 
+		/* Do not allow suspend iff read/write to EB address */
+		if ((adr & chip->in_progress_block_mask) ==
+		    chip->in_progress_block_addr)
+			goto sleep;
+
+		/* do not suspend small EBs, buggy Micron Chips */
+		if (cfi_is_micron_28F00AP30(cfi, chip) &&
+		    (chip->in_progress_block_mask == ~(0x8000-1)))
+			goto sleep;
 
 		/* Erase suspend */
-		map_write(map, CMD(0xB0), adr);
+		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
 
 		/* If the flash has finished erasing, then 'erase suspend'
 		 * appears to make some (28F320) flash devices switch to
 		 * 'read' mode.  Make sure that we switch to 'read status'
 		 * mode so we get the right data. --rmk
 		 */
-		map_write(map, CMD(0x70), adr);
+		map_write(map, CMD(0x70), chip->in_progress_block_addr);
 		chip->oldstate = FL_ERASING;
 		chip->state = FL_ERASE_SUSPENDING;
 		chip->erase_suspended = 1;
 		for (;;) {
-			status = map_read(map, adr);
+			status = map_read(map, chip->in_progress_block_addr);
 			if (map_word_andequal(map, status, status_OK, status_OK))
 			        break;
 
@@ -1041,8 +1062,8 @@
 		   sending the 0x70 (Read Status) command to an erasing
 		   chip and expecting it to be ignored, that's what we
 		   do. */
-		map_write(map, CMD(0xd0), adr);
-		map_write(map, CMD(0x70), adr);
+		map_write(map, CMD(0xd0), chip->in_progress_block_addr);
+		map_write(map, CMD(0x70), chip->in_progress_block_addr);
 		chip->oldstate = FL_READY;
 		chip->state = FL_ERASING;
 		break;
@@ -1933,6 +1954,8 @@
 	map_write(map, CMD(0xD0), adr);
 	chip->state = FL_ERASING;
 	chip->erase_suspended = 0;
+	chip->in_progress_block_addr = adr;
+	chip->in_progress_block_mask = ~(len - 1);
 
 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
 				   adr, len,
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 9dca881..107c05b 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -812,9 +812,10 @@
 		    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
 			goto sleep;
 
-		/* We could check to see if we're trying to access the sector
-		 * that is currently being erased. However, no user will try
-		 * anything like that so we just wait for the timeout. */
+		/* Do not allow suspend iff read/write to EB address */
+		if ((adr & chip->in_progress_block_mask) ==
+		    chip->in_progress_block_addr)
+			goto sleep;
 
 		/* Erase suspend */
 		/* It's harmless to issue the Erase-Suspend and Erase-Resume
@@ -2263,6 +2264,7 @@
 	chip->state = FL_ERASING;
 	chip->erase_suspended = 0;
 	chip->in_progress_block_addr = adr;
+	chip->in_progress_block_mask = ~(map->size - 1);
 
 	INVALIDATE_CACHE_UDELAY(map, chip,
 				adr, map->size,
@@ -2352,6 +2354,7 @@
 	chip->state = FL_ERASING;
 	chip->erase_suspended = 0;
 	chip->in_progress_block_addr = adr;
+	chip->in_progress_block_mask = ~(len - 1);
 
 	INVALIDATE_CACHE_UDELAY(map, chip,
 				adr, len,
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 551f0f8..91d8a48 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -450,7 +450,7 @@
 {
 	int i;
 
-	if (!client_info->slave)
+	if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
 		return;
 
 	for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
@@ -944,6 +944,10 @@
 	skb->priority = TC_PRIO_CONTROL;
 	skb->dev = slave->dev;
 
+	netdev_dbg(slave->bond->dev,
+		   "Send learning packet: dev %s mac %pM vlan %d\n",
+		   slave->dev->name, mac_addr, vid);
+
 	if (vid)
 		__vlan_hwaccel_put_tag(skb, vlan_proto, vid);
 
@@ -966,14 +970,13 @@
 	 */
 	rcu_read_lock();
 	netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-		if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
-			if (strict_match &&
-			    ether_addr_equal_64bits(mac_addr,
-						    upper->dev_addr)) {
+		if (is_vlan_dev(upper) &&
+		    bond->nest_level == vlan_get_encap_level(upper) - 1) {
+			if (upper->addr_assign_type == NET_ADDR_STOLEN) {
 				alb_send_lp_vid(slave, mac_addr,
 						vlan_dev_vlan_proto(upper),
 						vlan_dev_vlan_id(upper));
-			} else if (!strict_match) {
+			} else {
 				alb_send_lp_vid(slave, upper->dev_addr,
 						vlan_dev_vlan_proto(upper),
 						vlan_dev_vlan_id(upper));
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 513457a..1a139d0 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1654,8 +1654,7 @@
 	} /* switch(bond_mode) */
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-	slave_dev->npinfo = bond->dev->npinfo;
-	if (slave_dev->npinfo) {
+	if (bond->dev->npinfo) {
 		if (slave_enable_netpoll(new_slave)) {
 			netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
 			res = -EBUSY;
@@ -1733,6 +1732,8 @@
 	if (bond_mode_uses_xmit_hash(bond))
 		bond_update_slave_arr(bond, NULL);
 
+	bond->nest_level = dev_get_nest_level(bond_dev);
+
 	netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
 		    slave_dev->name,
 		    bond_is_active_slave(new_slave) ? "an active" : "a backup",
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index c9d61a6..3a75352 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1179,7 +1179,7 @@
 
 	skb = alloc_can_skb(priv->netdev, &cf);
 	if (!skb) {
-		stats->tx_dropped++;
+		stats->rx_dropped++;
 		return;
 	}
 
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 49f4cafe..86a32fe 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -529,7 +529,8 @@
 	int i;
 
 	for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
-		int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
+		u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1);
+		unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN;
 
 		slot = &ring->slots[i];
 		dev_kfree_skb(slot->skb);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 3aa993b..ca57eb5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3401,6 +3401,9 @@
 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
 	struct hwrm_vnic_tpa_cfg_input req = {0};
 
+	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+		return 0;
+
 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
 
 	if (tpa_flags) {
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 795a133..4ffbe85 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8720,14 +8720,15 @@
 	tg3_mem_rx_release(tp);
 	tg3_mem_tx_release(tp);
 
-	/* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
-	tg3_full_lock(tp, 0);
+	/* tp->hw_stats can be referenced safely:
+	 *     1. under rtnl_lock
+	 *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
+	 */
 	if (tp->hw_stats) {
 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
 				  tp->hw_stats, tp->stats_mapping);
 		tp->hw_stats = NULL;
 	}
-	tg3_full_unlock(tp);
 }
 
 /*
@@ -14161,7 +14162,7 @@
 	struct tg3 *tp = netdev_priv(dev);
 
 	spin_lock_bh(&tp->lock);
-	if (!tp->hw_stats) {
+	if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
 		*stats = tp->net_stats_prev;
 		spin_unlock_bh(&tp->lock);
 		return stats;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 3ec32d7..c395b21 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -836,8 +836,6 @@
 
 	err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
 			       adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
-	if (err)
-		t4_free_sge_resources(adap);
 	return err;
 }
 
@@ -4940,6 +4938,13 @@
 	if (err)
 		goto out_free_dev;
 
+	err = setup_fw_sge_queues(adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev,
+			"FW sge queue allocation failed, err %d", err);
+		goto out_free_dev;
+	}
+
 	/*
 	 * The card is now ready to go.  If any errors occur during device
 	 * registration we do not fail the whole card but rather proceed only
@@ -4983,7 +4988,6 @@
 	}
 
 	print_adapter_info(adapter);
-	setup_fw_sge_queues(adapter);
 	return 0;
 
 sriov:
@@ -5035,6 +5039,7 @@
 #endif
 
  out_free_dev:
+	t4_free_sge_resources(adapter);
 	free_some_resources(adapter);
 	if (adapter->flags & USING_MSIX)
 		free_msix_info(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 2471ff4..23d6c44 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -342,6 +342,7 @@
 {
 	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
 
+	adap->sge.uld_rxq_info[uld_type] = NULL;
 	kfree(rxq_info->rspq_id);
 	kfree(rxq_info->uldrxq);
 	kfree(rxq_info);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 48f82ab..dda63b2 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1726,6 +1726,8 @@
 	}
 
 	for (i = 0; i < enic->rq_count; i++) {
+		/* enable rq before updating rq desc */
+		vnic_rq_enable(&enic->rq[i]);
 		vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
 		/* Need at least one buffer on ring to get going */
 		if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
@@ -1737,8 +1739,6 @@
 
 	for (i = 0; i < enic->wq_count; i++)
 		vnic_wq_enable(&enic->wq[i]);
-	for (i = 0; i < enic->rq_count; i++)
-		vnic_rq_enable(&enic->rq[i]);
 
 	if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
 		enic_dev_add_station_addr(enic);
@@ -1765,8 +1765,12 @@
 	return 0;
 
 err_out_free_rq:
-	for (i = 0; i < enic->rq_count; i++)
+	for (i = 0; i < enic->rq_count; i++) {
+		err = vnic_rq_disable(&enic->rq[i]);
+		if (err)
+			return err;
 		vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+	}
 	enic_dev_notify_unset(enic);
 err_out_free_intr:
 	enic_unset_affinity_hint(enic);
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index c88918c..641b916 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1036,7 +1036,7 @@
 	set_bucket(dtsec->regs, bucket, true);
 
 	/* Create element to be added to the driver hash table */
-	hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+	hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
 	if (!hash_entry)
 		return -ENOMEM;
 	hash_entry->addr = addr;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index e3b41ba..60bd1b3 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2935,7 +2935,7 @@
 static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
 			     struct sk_buff *skb, bool first)
 {
-	unsigned int size = lstatus & BD_LENGTH_MASK;
+	int size = lstatus & BD_LENGTH_MASK;
 	struct page *page = rxb->page;
 	bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
 
@@ -2950,11 +2950,16 @@
 		if (last)
 			size -= skb->len;
 
-		/* in case the last fragment consisted only of the FCS */
+		/* Add the last fragment if it contains something other than
+		 * the FCS, otherwise drop it and trim off any part of the FCS
+		 * that was already received.
+		 */
 		if (size > 0)
 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
 					rxb->page_offset + RXBUF_ALIGNMENT,
 					size, GFAR_RXB_TRUESIZE);
+		else if (size < 0)
+			pskb_trim(skb, skb->len + size);
 	}
 
 	/* try reuse page */
@@ -3070,9 +3075,6 @@
 	if (ndev->features & NETIF_F_RXCSUM)
 		gfar_rx_checksum(skb, fcb);
 
-	/* Tell the skb what kind of packet this is */
-	skb->protocol = eth_type_trans(skb, ndev);
-
 	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
 	 * Even if vlan rx accel is disabled, on some chips
 	 * RXFCB_VLN is pseudo randomly set.
@@ -3143,13 +3145,15 @@
 			continue;
 		}
 
+		gfar_process_frame(ndev, skb);
+
 		/* Increment the number of packets */
 		total_pkts++;
 		total_bytes += skb->len;
 
 		skb_record_rx_queue(skb, rx_queue->qindex);
 
-		gfar_process_frame(ndev, skb);
+		skb->protocol = eth_type_trans(skb, ndev);
 
 		/* Send the packet up the stack */
 		napi_gro_receive(&rx_queue->grp->napi_rx, skb);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 49094c9..897a87a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -994,6 +994,7 @@
 			netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
 			/* free the entry */
 			next->rx_comp.first = 0;
+			dev_kfree_skb_any(rx_buff->skb);
 			remove_buff_from_pool(adapter, rx_buff);
 			break;
 		}
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 8a48656..7ddac95 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1600,7 +1600,7 @@
 	 * we have already determined whether we have link or not.
 	 */
 	if (!mac->autoneg)
-		return -E1000_ERR_CONFIG;
+		return 1;
 
 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
 	 * of MAC speed/duplex configuration.  So we only need to
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index f457c57..db73564 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -450,7 +450,7 @@
 	 * we have already determined whether we have link or not.
 	 */
 	if (!mac->autoneg)
-		return -E1000_ERR_CONFIG;
+		return 1;
 
 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
 	 * of MAC speed/duplex configuration.  So we only need to
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 825ec8f..9c95222 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2331,8 +2331,8 @@
 {
 	struct pci_dev *pdev = adapter->pdev;
 
-	ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
-					GFP_KERNEL);
+	ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma,
+					 GFP_KERNEL);
 	if (!ring->desc)
 		return -ENOMEM;
 
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 0562938..ea5ea65 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -803,8 +803,12 @@
 	if (vid >= VLAN_N_VID)
 		return -EINVAL;
 
-	/* Verify we have permission to add VLANs */
-	if (hw->mac.vlan_override)
+	/* Verify that we have permission to add VLANs. If this is a request
+	 * to remove a VLAN, we still want to allow the user to remove the
+	 * VLAN device. In that case, we need to clear the bit in the
+	 * active_vlans bitmask.
+	 */
+	if (set && hw->mac.vlan_override)
 		return -EACCES;
 
 	/* update active_vlans bitmask */
@@ -823,6 +827,12 @@
 			rx_ring->vid &= ~FM10K_VLAN_CLEAR;
 	}
 
+	/* If our VLAN has been overridden, there is no reason to send VLAN
+	 * removal requests as they will be silently ignored.
+	 */
+	if (hw->mac.vlan_override)
+		return 0;
+
 	/* Do not remove default VLAN ID related entries from VLAN and MAC
 	 * tables
 	 */
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index fa46326..17b8178 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1080,6 +1080,7 @@
 	}
 	mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
 
+	q_map = 0;
 	/* Enable all initialized RXQs. */
 	for (queue = 0; queue < rxq_number; queue++) {
 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 24977cc..9a4c4f8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -970,6 +970,22 @@
 	if (!coal->tx_max_coalesced_frames_irq)
 		return -EINVAL;
 
+	if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
+	    coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
+	    coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
+	    coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
+		netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
+			    __func__, MLX4_EN_MAX_COAL_TIME);
+		return -ERANGE;
+	}
+
+	if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
+	    coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
+		netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
+			    __func__, MLX4_EN_MAX_COAL_PKTS);
+		return -ERANGE;
+	}
+
 	priv->rx_frames = (coal->rx_max_coalesced_frames ==
 			   MLX4_EN_AUTO_CONF) ?
 				MLX4_EN_RX_COAL_TARGET :
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 5411ca4..cb7c3ef 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2983,6 +2983,7 @@
 		mlx4_err(dev, "Failed to create file for port %d\n", port);
 		devlink_port_unregister(&info->devlink_port);
 		info->port = -1;
+		return err;
 	}
 
 	sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
@@ -3004,9 +3005,10 @@
 				   &info->port_attr);
 		devlink_port_unregister(&info->devlink_port);
 		info->port = -1;
+		return err;
 	}
 
-	return err;
+	return 0;
 }
 
 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 18f221d..247d340 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -141,6 +141,9 @@
 #define MLX4_EN_TX_COAL_PKTS	16
 #define MLX4_EN_TX_COAL_TIME	0x10
 
+#define MLX4_EN_MAX_COAL_PKTS	U16_MAX
+#define MLX4_EN_MAX_COAL_TIME	U16_MAX
+
 #define MLX4_EN_RX_RATE_LOW		400000
 #define MLX4_EN_RX_COAL_TIME_LOW	0
 #define MLX4_EN_RX_RATE_HIGH		450000
@@ -543,8 +546,8 @@
 	u16 rx_usecs_low;
 	u32 pkt_rate_high;
 	u16 rx_usecs_high;
-	u16 sample_interval;
-	u16 adaptive_rx_coal;
+	u32 sample_interval;
+	u32 adaptive_rx_coal;
 	u32 msg_enable;
 	u32 loopback_ok;
 	u32 validate_loopback;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 4c3f1cb..6631fb0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1765,7 +1765,7 @@
 
 	cmd->checksum_disabled = 1;
 	cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
-	cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
+	cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
 
 	cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
 	if (cmd->cmdif_rev > CMD_IF_REV) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index a8966e6..5d6eab1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1924,26 +1924,35 @@
 	memset(vf_stats, 0, sizeof(*vf_stats));
 	vf_stats->rx_packets =
 		MLX5_GET_CTR(out, received_eth_unicast.packets) +
+		MLX5_GET_CTR(out, received_ib_unicast.packets) +
 		MLX5_GET_CTR(out, received_eth_multicast.packets) +
+		MLX5_GET_CTR(out, received_ib_multicast.packets) +
 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
 
 	vf_stats->rx_bytes =
 		MLX5_GET_CTR(out, received_eth_unicast.octets) +
+		MLX5_GET_CTR(out, received_ib_unicast.octets) +
 		MLX5_GET_CTR(out, received_eth_multicast.octets) +
+		MLX5_GET_CTR(out, received_ib_multicast.octets) +
 		MLX5_GET_CTR(out, received_eth_broadcast.octets);
 
 	vf_stats->tx_packets =
 		MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
+		MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
 		MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
+		MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
 		MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
 
 	vf_stats->tx_bytes =
 		MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
+		MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
 		MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
+		MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
 		MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
 
 	vf_stats->multicast =
-		MLX5_GET_CTR(out, received_eth_multicast.packets);
+		MLX5_GET_CTR(out, received_eth_multicast.packets) +
+		MLX5_GET_CTR(out, received_ib_multicast.packets);
 
 	vf_stats->broadcast =
 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 331a6ca..5f3402b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -153,6 +153,7 @@
 static void del_flow_table(struct fs_node *node);
 static void del_flow_group(struct fs_node *node);
 static void del_fte(struct fs_node *node);
+static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
 
 static void tree_init_node(struct fs_node *node,
 			   unsigned int refcount,
@@ -1690,24 +1691,28 @@
 
 static int init_root_ns(struct mlx5_flow_steering *steering)
 {
+	int err;
 
 	steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
 	if (!steering->root_ns)
-		goto cleanup;
+		return -ENOMEM;
 
-	if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
-		goto cleanup;
+	err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
+	if (err)
+		goto out_err;
 
 	set_prio_attrs(steering->root_ns);
 
-	if (create_anchor_flow_table(steering))
-		goto cleanup;
+	err = create_anchor_flow_table(steering);
+	if (err)
+		goto out_err;
 
 	return 0;
 
-cleanup:
-	mlx5_cleanup_fs(steering->dev);
-	return -ENOMEM;
+out_err:
+	cleanup_root_ns(steering->root_ns);
+	steering->root_ns = NULL;
+	return err;
 }
 
 static void clean_tree(struct fs_node *node)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 4ca82bd..eee6e59 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -854,6 +854,8 @@
 
 	netdev_tx_sent_queue(nd_q, txbuf->real_len);
 
+	skb_tx_timestamp(skb);
+
 	tx_ring->wr_p += nr_frags + 1;
 	if (nfp_net_tx_ring_should_stop(tx_ring))
 		nfp_net_tx_ring_stop(nd_q, tx_ring);
@@ -866,8 +868,6 @@
 		tx_ring->wr_ptr_add = 0;
 	}
 
-	skb_tx_timestamp(skb);
-
 	return NETDEV_TX_OK;
 
 err_unmap:
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index f683bfb..9d223ff 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1250,9 +1250,9 @@
 	while (tx_q->tpd.consume_idx != hw_consume_idx) {
 		tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx);
 		if (tpbuf->dma_addr) {
-			dma_unmap_single(adpt->netdev->dev.parent,
-					 tpbuf->dma_addr, tpbuf->length,
-					 DMA_TO_DEVICE);
+			dma_unmap_page(adpt->netdev->dev.parent,
+				       tpbuf->dma_addr, tpbuf->length,
+				       DMA_TO_DEVICE);
 			tpbuf->dma_addr = 0;
 		}
 
@@ -1409,9 +1409,11 @@
 
 		tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
 		tpbuf->length = mapped_len;
-		tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent,
-						 skb->data, tpbuf->length,
-						 DMA_TO_DEVICE);
+		tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
+					       virt_to_page(skb->data),
+					       offset_in_page(skb->data),
+					       tpbuf->length,
+					       DMA_TO_DEVICE);
 		ret = dma_mapping_error(adpt->netdev->dev.parent,
 					tpbuf->dma_addr);
 		if (ret)
@@ -1427,9 +1429,12 @@
 	if (mapped_len < len) {
 		tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
 		tpbuf->length = len - mapped_len;
-		tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent,
-						 skb->data + mapped_len,
-						 tpbuf->length, DMA_TO_DEVICE);
+		tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
+					       virt_to_page(skb->data +
+							    mapped_len),
+					       offset_in_page(skb->data +
+							      mapped_len),
+					       tpbuf->length, DMA_TO_DEVICE);
 		ret = dma_mapping_error(adpt->netdev->dev.parent,
 					tpbuf->dma_addr);
 		if (ret)
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index da4c2d8..1420dfb 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2233,7 +2233,7 @@
 	struct rtl8139_private *tp = netdev_priv(dev);
 	const int irq = tp->pci_dev->irq;
 
-	disable_irq(irq);
+	disable_irq_nosync(irq);
 	rtl8139_interrupt(irq, dev);
 	enable_irq(irq);
 }
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index dbb6364..59b932d 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4861,6 +4861,9 @@
 static void rtl_pll_power_up(struct rtl8169_private *tp)
 {
 	rtl_generic_op(tp, tp->pll_power_ops.up);
+
+	/* give MAC/PHY some time to resume */
+	msleep(20);
 }
 
 static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8b0016a..734caa7 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2330,14 +2330,14 @@
 	pdata = netdev_priv(dev);
 	BUG_ON(!pdata);
 	BUG_ON(!pdata->ioaddr);
-	WARN_ON(dev->phydev);
 
 	SMSC_TRACE(pdata, ifdown, "Stopping driver");
 
+	unregister_netdev(dev);
+
 	mdiobus_unregister(pdata->mii_bus);
 	mdiobus_free(pdata->mii_bus);
 
-	unregister_netdev(dev);
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 					   "smsc911x-memory");
 	if (!res)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index ffaed1f..f356a44 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -118,7 +118,7 @@
 	snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev));
 	init.name = clk_name;
 	init.ops = &clk_mux_ops;
-	init.flags = 0;
+	init.flags = CLK_SET_RATE_PARENT;
 	init.parent_names = mux_parent_names;
 	init.num_parents = MUX_CLK_NUM_PARENTS;
 
@@ -146,7 +146,9 @@
 	dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
 	dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
 	dwmac->m250_div.hw.init = &init;
-	dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
+	dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED |
+				CLK_DIVIDER_ALLOW_ZERO |
+				CLK_DIVIDER_ROUND_CLOSEST;
 
 	dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw);
 	if (WARN_ON(IS_ERR(dwmac->m250_div_clk)))
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c212d1d..b3bc128 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1343,6 +1343,11 @@
 		if (unlikely(status & tx_dma_own))
 			break;
 
+		/* Make sure descriptor fields are read after reading
+		 * the own bit.
+		 */
+		dma_rmb();
+
 		/* Just consider the last segment and ...*/
 		if (likely(!(status & tx_not_ls))) {
 			/* ... verify the status error condition */
@@ -2136,8 +2141,15 @@
 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
 
 	/* If context desc is used to change MSS */
-	if (mss_desc)
+	if (mss_desc) {
+		/* Make sure that first descriptor has been completely
+		 * written, including its own bit. This is because MSS is
+		 * actually before first descriptor, so we need to make
+		 * sure that MSS's own bit is the last thing written.
+		 */
+		dma_wmb();
 		priv->hw->desc->set_tx_owner(mss_desc);
+	}
 
 	/* The own bit must be the latest setting done when prepare the
 	 * descriptor and then barrier is needed to make sure that
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index a2371aa..e45e2f1 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3442,7 +3442,7 @@
 
 		len = (val & RCR_ENTRY_L2_LEN) >>
 			RCR_ENTRY_L2_LEN_SHIFT;
-		len -= ETH_FCS_LEN;
+		append_size = len + ETH_HLEN + ETH_FCS_LEN;
 
 		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
 			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
@@ -3452,7 +3452,6 @@
 					 RCR_ENTRY_PKTBUFSZ_SHIFT];
 
 		off = addr & ~PAGE_MASK;
-		append_size = rcr_size;
 		if (num_rcr == 1) {
 			int ptype;
 
@@ -3465,7 +3464,7 @@
 			else
 				skb_checksum_none_assert(skb);
 		} else if (!(val & RCR_ENTRY_MULTI))
-			append_size = len - skb->len;
+			append_size = append_size - skb->len;
 
 		niu_rx_skb_append(skb, page, off, append_size, rcr_size);
 		if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index a2f9b47..e36c700 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -198,7 +198,7 @@
 	dev->ethtool_ops = &vnet_ethtool_ops;
 	dev->watchdog_timeo = VNET_TX_TIMEOUT;
 
-	dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
+	dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO |
 			   NETIF_F_HW_CSUM | NETIF_F_SG;
 	dev->features = dev->hw_features;
 
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 552de9c..d7cb205 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -124,7 +124,7 @@
 
 #define RX_PRIORITY_MAPPING	0x76543210
 #define TX_PRIORITY_MAPPING	0x33221100
-#define CPDMA_TX_PRIORITY_MAP	0x01234567
+#define CPDMA_TX_PRIORITY_MAP	0x76543210
 
 #define CPSW_VLAN_AWARE		BIT(1)
 #define CPSW_ALE_VLAN_AWARE	1
@@ -1141,6 +1141,8 @@
 	cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
 			   HOST_PORT_NUM, ALE_VLAN |
 			   ALE_SECURE, slave->port_vlan);
+	cpsw_ale_control_set(cpsw->ale, slave_port,
+			     ALE_PORT_DROP_UNKNOWN_VLAN, 1);
 }
 
 static void soft_reset_slave(struct cpsw_slave *slave)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index e8ad4d0..6237236 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1384,7 +1384,7 @@
 	/* the macvlan port may be freed by macvlan_uninit when fail to register.
 	 * so we destroy the macvlan port only when it's valid.
 	 */
-	if (create && macvlan_port_get_rtnl(dev))
+	if (create && macvlan_port_get_rtnl(lowerdev))
 		macvlan_port_destroy(port->dev);
 	return err;
 }
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index b88f7d6..482ea40 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1205,6 +1205,23 @@
 	kfree(dp83640);
 }
 
+static int dp83640_soft_reset(struct phy_device *phydev)
+{
+	int ret;
+
+	ret = genphy_soft_reset(phydev);
+	if (ret < 0)
+		return ret;
+
+	/* From DP83640 datasheet: "Software driver code must wait 3 us
+	 * following a software reset before allowing further serial MII
+	 * operations with the DP83640."
+	 */
+	udelay(10);		/* Taking udelay inaccuracy into account */
+
+	return 0;
+}
+
 static int dp83640_config_init(struct phy_device *phydev)
 {
 	struct dp83640_private *dp83640 = phydev->priv;
@@ -1498,6 +1515,7 @@
 	.flags		= PHY_HAS_INTERRUPT,
 	.probe		= dp83640_probe,
 	.remove		= dp83640_remove,
+	.soft_reset	= dp83640_soft_reset,
 	.config_init	= dp83640_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index dc36c2e..fa2c7bd 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -620,6 +620,10 @@
 	lock_sock(sk);
 
 	error = -EINVAL;
+
+	if (sockaddr_len != sizeof(struct sockaddr_pppox))
+		goto end;
+
 	if (sp->sa_protocol != PX_PROTO_OE)
 		goto end;
 
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 8673ef3..3696368 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -261,6 +261,17 @@
 	}
 }
 
+static bool __team_option_inst_tmp_find(const struct list_head *opts,
+					const struct team_option_inst *needle)
+{
+	struct team_option_inst *opt_inst;
+
+	list_for_each_entry(opt_inst, opts, tmp_list)
+		if (opt_inst == needle)
+			return true;
+	return false;
+}
+
 static int __team_options_register(struct team *team,
 				   const struct team_option *option,
 				   size_t option_count)
@@ -1067,14 +1078,11 @@
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int __team_port_enable_netpoll(struct team_port *port)
 {
 	struct netpoll *np;
 	int err;
 
-	if (!team->dev->npinfo)
-		return 0;
-
 	np = kzalloc(sizeof(*np), GFP_KERNEL);
 	if (!np)
 		return -ENOMEM;
@@ -1088,6 +1096,14 @@
 	return err;
 }
 
+static int team_port_enable_netpoll(struct team_port *port)
+{
+	if (!port->team->dev->npinfo)
+		return 0;
+
+	return __team_port_enable_netpoll(port);
+}
+
 static void team_port_disable_netpoll(struct team_port *port)
 {
 	struct netpoll *np = port->np;
@@ -1102,7 +1118,7 @@
 	kfree(np);
 }
 #else
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int team_port_enable_netpoll(struct team_port *port)
 {
 	return 0;
 }
@@ -1210,7 +1226,7 @@
 		goto err_vids_add;
 	}
 
-	err = team_port_enable_netpoll(team, port);
+	err = team_port_enable_netpoll(port);
 	if (err) {
 		netdev_err(dev, "Failed to enable netpoll on device %s\n",
 			   portname);
@@ -1908,7 +1924,7 @@
 
 	mutex_lock(&team->lock);
 	list_for_each_entry(port, &team->port_list, list) {
-		err = team_port_enable_netpoll(team, port);
+		err = __team_port_enable_netpoll(port);
 		if (err) {
 			__team_netpoll_cleanup(team);
 			break;
@@ -2569,6 +2585,14 @@
 			if (err)
 				goto team_put;
 			opt_inst->changed = true;
+
+			/* dumb/evil user-space can send us duplicate opt,
+			 * keep only the last one
+			 */
+			if (__team_option_inst_tmp_find(&opt_inst_list,
+							opt_inst))
+				continue;
+
 			list_add(&opt_inst->tmp_list, &opt_inst_list);
 		}
 		if (!opt_found) {
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 4fb4686..99424c8 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -530,6 +530,7 @@
 #define REALTEK_VENDOR_ID	0x0bda
 #define SAMSUNG_VENDOR_ID	0x04e8
 #define LENOVO_VENDOR_ID	0x17ef
+#define LINKSYS_VENDOR_ID	0x13b1
 #define NVIDIA_VENDOR_ID	0x0955
 #define HP_VENDOR_ID		0x03f0
 
@@ -719,6 +720,15 @@
 	.driver_info = 0,
 },
 
+#if IS_ENABLED(CONFIG_USB_RTL8152)
+/* Linksys USB3GIGV1 Ethernet Adapter */
+{
+	USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
+			USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+	.driver_info = 0,
+},
+#endif
+
 /* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
 {
 	USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 973e90f..1d56c73 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -803,12 +803,16 @@
 	{QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
 	{QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
 	{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x90b2, 3)},    /* ublox R410M */
 	{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
 	{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
 	{QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)},	/* YUGA CLM920-NC5 */
 	{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
 	{QMI_FIXED_INTF(0x12d1, 0x140c, 1)},	/* Huawei E173 */
 	{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},	/* Huawei E1820 */
+	{QMI_FIXED_INTF(0x1435, 0xd181, 3)},	/* Wistron NeWeb D18Q1 */
+	{QMI_FIXED_INTF(0x1435, 0xd181, 4)},	/* Wistron NeWeb D18Q1 */
+	{QMI_FIXED_INTF(0x1435, 0xd181, 5)},	/* Wistron NeWeb D18Q1 */
 	{QMI_FIXED_INTF(0x16d8, 0x6003, 0)},	/* CMOTech 6003 */
 	{QMI_FIXED_INTF(0x16d8, 0x6007, 0)},	/* CMOTech CHE-628S */
 	{QMI_FIXED_INTF(0x16d8, 0x6008, 0)},	/* CMOTech CMU-301 */
@@ -885,6 +889,7 @@
 	{QMI_FIXED_INTF(0x19d2, 0x2002, 4)},	/* ZTE (Vodafone) K3765-Z */
 	{QMI_FIXED_INTF(0x2001, 0x7e19, 4)},	/* D-Link DWM-221 B1 */
 	{QMI_FIXED_INTF(0x2001, 0x7e35, 4)},	/* D-Link DWM-222 */
+	{QMI_FIXED_INTF(0x2020, 0x2033, 4)},	/* BroadMobi BM806U */
 	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
 	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
@@ -941,6 +946,7 @@
 	{QMI_FIXED_INTF(0x413c, 0x81b6, 8)},	/* Dell Wireless 5811e */
 	{QMI_FIXED_INTF(0x413c, 0x81b6, 10)},	/* Dell Wireless 5811e */
 	{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},	/* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
+	{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)},	/* HP lt4120 Snapdragon X5 LTE */
 	{QMI_FIXED_INTF(0x22de, 0x9061, 3)},	/* WeTelecom WPD-600N */
 	{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)},	/* SIMCom 7230E */
 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)},	/* Quectel EC25, EC20 R2.0  Mini PCIe */
@@ -1038,6 +1044,18 @@
 		id->driver_info = (unsigned long)&qmi_wwan_info;
 	}
 
+	/* There are devices where the same interface number can be
+	 * configured as different functions. We should only bind to
+	 * vendor specific functions when matching on interface number
+	 */
+	if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER &&
+	    desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) {
+		dev_dbg(&intf->dev,
+			"Rejecting interface number match for class %02x\n",
+			desc->bInterfaceClass);
+		return -ENODEV;
+	}
+
 	/* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */
 	if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) {
 		dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n");
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index b2d7c7e..d3d89b0 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -519,6 +519,7 @@
 #define VENDOR_ID_REALTEK		0x0bda
 #define VENDOR_ID_SAMSUNG		0x04e8
 #define VENDOR_ID_LENOVO		0x17ef
+#define VENDOR_ID_LINKSYS		0x13b1
 #define VENDOR_ID_NVIDIA		0x0955
 
 #define MCU_TYPE_PLA			0x0100
@@ -1692,7 +1693,7 @@
 
 		tx_data += len;
 		agg->skb_len += len;
-		agg->skb_num++;
+		agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1;
 
 		dev_kfree_skb_any(skb);
 
@@ -4506,6 +4507,7 @@
 	{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
 	{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7205)},
 	{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x304f)},
+	{REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
 	{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA,  0x09ff)},
 	{}
 };
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 4cb9b11..2cc0f28 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -957,10 +957,11 @@
 	/* it's racing here! */
 
 	ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
-	if (ret < 0)
+	if (ret < 0) {
 		netdev_warn(dev->net, "Error writing RFE_CTL\n");
-
-	return ret;
+		return ret;
+	}
+	return 0;
 }
 
 static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 472ed6d..7118b82 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1949,8 +1949,8 @@
 
 	/* Assume link up if device can't report link status,
 	   otherwise get link status from config. */
+	netif_carrier_off(dev);
 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
-		netif_carrier_off(dev);
 		schedule_work(&vi->config_work);
 	} else {
 		vi->status = VIRTIO_NET_S_LINK_UP;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index f809eed..c999b10 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -369,6 +369,11 @@
 
 	gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
 	while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
+		/* Prevent any &gdesc->tcd field from being (speculatively)
+		 * read before (&gdesc->tcd)->gen is read.
+		 */
+		dma_rmb();
+
 		completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
 					       &gdesc->tcd), tq, adapter->pdev,
 					       adapter);
@@ -1099,6 +1104,11 @@
 		gdesc->txd.tci = skb_vlan_tag_get(skb);
 	}
 
+	/* Ensure that the write to (&gdesc->txd)->gen will be observed after
+	 * all other writes to &gdesc->txd.
+	 */
+	dma_wmb();
+
 	/* finally flips the GEN bit of the SOP desc. */
 	gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
 						  VMXNET3_TXD_GEN);
@@ -1286,6 +1296,12 @@
 			 */
 			break;
 		}
+
+		/* Prevent any rcd field from being (speculatively) read before
+		 * rcd->gen is read.
+		 */
+		dma_rmb();
+
 		BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
 		       rcd->rqID != rq->dataRingQid);
 		idx = rcd->rxdIdx;
@@ -1515,6 +1531,12 @@
 		ring->next2comp = idx;
 		num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
 		ring = rq->rx_ring + ring_idx;
+
+		/* Ensure that the writes to rxd->gen bits will be observed
+		 * after all other writes to rxd objects.
+		 */
+		dma_wmb();
+
 		while (num_to_alloc) {
 			vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
 					  &rxCmdDesc);
@@ -2675,7 +2697,7 @@
 /* ==================== initialization and cleanup routines ============ */
 
 static int
-vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
+vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
 {
 	int err;
 	unsigned long mmio_start, mmio_len;
@@ -2687,30 +2709,12 @@
 		return err;
 	}
 
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
-		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
-			dev_err(&pdev->dev,
-				"pci_set_consistent_dma_mask failed\n");
-			err = -EIO;
-			goto err_set_mask;
-		}
-		*dma64 = true;
-	} else {
-		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
-			dev_err(&pdev->dev,
-				"pci_set_dma_mask failed\n");
-			err = -EIO;
-			goto err_set_mask;
-		}
-		*dma64 = false;
-	}
-
 	err = pci_request_selected_regions(pdev, (1 << 2) - 1,
 					   vmxnet3_driver_name);
 	if (err) {
 		dev_err(&pdev->dev,
 			"Failed to request region for adapter: error %d\n", err);
-		goto err_set_mask;
+		goto err_enable_device;
 	}
 
 	pci_set_master(pdev);
@@ -2738,7 +2742,7 @@
 	iounmap(adapter->hw_addr0);
 err_ioremap:
 	pci_release_selected_regions(pdev, (1 << 2) - 1);
-err_set_mask:
+err_enable_device:
 	pci_disable_device(pdev);
 	return err;
 }
@@ -3246,7 +3250,7 @@
 #endif
 	};
 	int err;
-	bool dma64 = false; /* stupid gcc */
+	bool dma64;
 	u32 ver;
 	struct net_device *netdev;
 	struct vmxnet3_adapter *adapter;
@@ -3292,6 +3296,24 @@
 	adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
 	adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
 
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+			dev_err(&pdev->dev,
+				"pci_set_consistent_dma_mask failed\n");
+			err = -EIO;
+			goto err_set_mask;
+		}
+		dma64 = true;
+	} else {
+		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+			dev_err(&pdev->dev,
+				"pci_set_dma_mask failed\n");
+			err = -EIO;
+			goto err_set_mask;
+		}
+		dma64 = false;
+	}
+
 	spin_lock_init(&adapter->cmd_lock);
 	adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
 					     sizeof(struct vmxnet3_adapter),
@@ -3299,7 +3321,7 @@
 	if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
 		dev_err(&pdev->dev, "Failed to map dma\n");
 		err = -EFAULT;
-		goto err_dma_map;
+		goto err_set_mask;
 	}
 	adapter->shared = dma_alloc_coherent(
 				&adapter->pdev->dev,
@@ -3350,7 +3372,7 @@
 	}
 #endif /* VMXNET3_RSS */
 
-	err = vmxnet3_alloc_pci_resources(adapter, &dma64);
+	err = vmxnet3_alloc_pci_resources(adapter);
 	if (err < 0)
 		goto err_alloc_pci;
 
@@ -3492,7 +3514,7 @@
 err_alloc_shared:
 	dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
 			 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
-err_dma_map:
+err_set_mask:
 	free_netdev(netdev);
 	return err;
 }
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a497bf3..d68f4f2 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5819,9 +5819,8 @@
 				    sta->addr, smps, err);
 	}
 
-	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
-	    changed & IEEE80211_RC_NSS_CHANGED) {
-		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
+	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
 			   sta->addr);
 
 		err = ath10k_station_assoc(ar, arvif->vif, sta, true);
@@ -6929,10 +6928,20 @@
 {
 	struct ath10k *ar = hw->priv;
 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	struct ath10k_vif *arvif = (void *)vif->drv_priv;
+	struct ath10k_peer *peer;
 	u32 bw, smps;
 
 	spin_lock_bh(&ar->data_lock);
 
+	peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
+	if (!peer) {
+		spin_unlock_bh(&ar->data_lock);
+		ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n",
+			    sta->addr, arvif->vdev_id);
+		return;
+	}
+
 	ath10k_dbg(ar, ATH10K_DBG_MAC,
 		   "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
 		   sta->addr, changed, sta->bandwidth, sta->rx_nss,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index a35f78b..acef4ec9 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1603,6 +1603,10 @@
 	int count = 50;
 	u32 reg, last_val;
 
+	/* Check if chip failed to wake up */
+	if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
+		return false;
+
 	if (AR_SREV_9300(ah))
 		return !ath9k_hw_detect_mac_hang(ah);
 
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index f7c63cf..f70420f 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -21,11 +21,16 @@
 #include "ftm.h"
 
 #define WIL_MAX_ROC_DURATION_MS 5000
+#define CTRY_CHINA "CN"
 
 bool disable_ap_sme;
 module_param(disable_ap_sme, bool, 0444);
 MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME");
 
+static bool country_specific_board_file;
+module_param(country_specific_board_file, bool, 0444);
+MODULE_PARM_DESC(country_specific_board_file, " switch board file upon regulatory domain change (Default: false)");
+
 static bool ignore_reg_hints = true;
 module_param(ignore_reg_hints, bool, 0444);
 MODULE_PARM_DESC(ignore_reg_hints, " Ignore OTA regulatory hints (Default: true)");
@@ -1984,6 +1989,64 @@
 	return 0;
 }
 
+static int wil_switch_board_file(struct wil6210_priv *wil,
+				 const u8 *new_regdomain)
+{
+	int rc = 0;
+
+	if (!country_specific_board_file)
+		return 0;
+
+	if (memcmp(wil->regdomain, CTRY_CHINA, 2) == 0) {
+		wil_info(wil, "moving out of China reg domain, use default board file\n");
+		wil->board_file_country[0] = '\0';
+	} else if (memcmp(new_regdomain, CTRY_CHINA, 2) == 0) {
+		wil_info(wil, "moving into China reg domain, use country specific board file\n");
+		strlcpy(wil->board_file_country, CTRY_CHINA,
+			sizeof(wil->board_file_country));
+	} else {
+		return 0;
+	}
+
+	/* need to switch board file - reset the device */
+
+	mutex_lock(&wil->mutex);
+
+	if (!netif_running(wil_to_ndev(wil)) || wil_is_recovery_blocked(wil))
+		/* new board file will be used in next FW load */
+		goto out;
+
+	__wil_down(wil);
+	rc = __wil_up(wil);
+
+out:
+	mutex_unlock(&wil->mutex);
+	return rc;
+}
+
+static void wil_cfg80211_reg_notify(struct wiphy *wiphy,
+				    struct regulatory_request *request)
+{
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	int rc;
+
+	wil_info(wil, "cfg reg_notify %c%c%s%s initiator %d hint_type %d\n",
+		 request->alpha2[0], request->alpha2[1],
+		 request->intersect ? " intersect" : "",
+		 request->processed ? " processed" : "",
+		 request->initiator, request->user_reg_hint_type);
+
+	if (memcmp(wil->regdomain, request->alpha2, 2) == 0)
+		/* reg domain did not change */
+		return;
+
+	rc = wil_switch_board_file(wil, request->alpha2);
+	if (rc)
+		wil_err(wil, "switch board file failed %d\n", rc);
+
+	memcpy(wil->regdomain, request->alpha2, 2);
+}
+
 static struct cfg80211_ops wil_cfg80211_ops = {
 	.add_virtual_intf = wil_cfg80211_add_iface,
 	.del_virtual_intf = wil_cfg80211_del_iface,
@@ -2055,6 +2118,8 @@
 	wiphy->mgmt_stypes = wil_mgmt_stypes;
 	wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
 
+	wiphy->reg_notifier = wil_cfg80211_reg_notify;
+
 	wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands);
 	wiphy->vendor_commands = wil_nl80211_vendor_commands;
 	wiphy->vendor_events = wil_nl80211_vendor_events;
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 2baa6cf..f37254d 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -26,6 +26,7 @@
 
 #define WAIT_FOR_HALP_VOTE_MS 100
 #define WAIT_FOR_SCAN_ABORT_MS 1000
+#define WIL_BOARD_FILE_MAX_NAMELEN 128
 
 bool debug_fw; /* = false; */
 module_param(debug_fw, bool, 0444);
@@ -946,6 +947,30 @@
 	le32_to_cpus(&r->head);
 }
 
+/* construct actual board file name to use */
+void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len)
+{
+	const char *board_file = WIL_BOARD_FILE_NAME;
+	const char *ext;
+	int prefix_len;
+
+	if (wil->board_file_country[0] == '\0') {
+		strlcpy(buf, board_file, len);
+		return;
+	}
+
+	/* use country specific board file */
+	if (len < strlen(board_file) + 4 /* for _XX and terminating null */)
+		return;
+
+	ext = strrchr(board_file, '.');
+	prefix_len = (ext ? ext - board_file : strlen(board_file));
+	snprintf(buf, len, "%.*s_%.2s",
+		 prefix_len, board_file, wil->board_file_country);
+	if (ext)
+		strlcat(buf, ext, len);
+}
+
 static int wil_get_bl_info(struct wil6210_priv *wil)
 {
 	struct net_device *ndev = wil_to_ndev(wil);
@@ -1260,8 +1285,12 @@
 
 	wil_set_oob_mode(wil, oob_mode);
 	if (load_fw) {
+		char board_file[WIL_BOARD_FILE_MAX_NAMELEN];
+
+		board_file[0] = '\0';
+		wil_get_board_file(wil, board_file, sizeof(board_file));
 		wil_info(wil, "Use firmware <%s> + board <%s>\n",
-			 wil->wil_fw_name, WIL_BOARD_FILE_NAME);
+			 wil->wil_fw_name, board_file);
 
 		if (!no_flash)
 			wil_bl_prepare_halt(wil);
@@ -1273,11 +1302,9 @@
 		if (rc)
 			goto out;
 		if (wil->brd_file_addr)
-			rc = wil_request_board(wil, WIL_BOARD_FILE_NAME);
+			rc = wil_request_board(wil, board_file);
 		else
-			rc = wil_request_firmware(wil,
-						  WIL_BOARD_FILE_NAME,
-						  true);
+			rc = wil_request_firmware(wil, board_file, true);
 		if (rc)
 			goto out;
 
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index f4476ee..2b71deb 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -683,6 +683,7 @@
 	const char *hw_name;
 	const char *wil_fw_name;
 	char *board_file;
+	char board_file_country[3]; /* alpha2 */
 	u32 brd_file_addr;
 	u32 brd_file_max_size;
 	DECLARE_BITMAP(hw_capa, hw_capa_last);
@@ -796,6 +797,8 @@
 	} snr_thresh;
 
 	int fw_calib_result;
+	/* current reg domain configured in kernel */
+	char regdomain[3]; /* alpha2 */
 
 #ifdef CONFIG_PM
 	struct notifier_block pm_notify;
@@ -873,6 +876,8 @@
 	wil_w(wil, reg, wil_r(wil, reg) & ~val);
 }
 
+void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len);
+
 #if defined(CONFIG_DYNAMIC_DEBUG)
 #define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize,	\
 			  groupsize, buf, len, ascii)		\
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 54354a3..8352465 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2776,7 +2776,6 @@
 				   struct brcmf_bss_info_le *bi)
 {
 	struct wiphy *wiphy = cfg_to_wiphy(cfg);
-	struct ieee80211_channel *notify_channel;
 	struct cfg80211_bss *bss;
 	struct ieee80211_supported_band *band;
 	struct brcmu_chan ch;
@@ -2786,7 +2785,7 @@
 	u16 notify_interval;
 	u8 *notify_ie;
 	size_t notify_ielen;
-	s32 notify_signal;
+	struct cfg80211_inform_bss bss_data = {};
 
 	if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) {
 		brcmf_err("Bss info is larger than buffer. Discarding\n");
@@ -2806,27 +2805,28 @@
 		band = wiphy->bands[NL80211_BAND_5GHZ];
 
 	freq = ieee80211_channel_to_frequency(channel, band->band);
-	notify_channel = ieee80211_get_channel(wiphy, freq);
+	bss_data.chan = ieee80211_get_channel(wiphy, freq);
+	bss_data.scan_width = NL80211_BSS_CHAN_WIDTH_20;
+	bss_data.boottime_ns = ktime_to_ns(ktime_get_boottime());
 
 	notify_capability = le16_to_cpu(bi->capability);
 	notify_interval = le16_to_cpu(bi->beacon_period);
 	notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset);
 	notify_ielen = le32_to_cpu(bi->ie_length);
-	notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
+	bss_data.signal = (s16)le16_to_cpu(bi->RSSI) * 100;
 
 	brcmf_dbg(CONN, "bssid: %pM\n", bi->BSSID);
 	brcmf_dbg(CONN, "Channel: %d(%d)\n", channel, freq);
 	brcmf_dbg(CONN, "Capability: %X\n", notify_capability);
 	brcmf_dbg(CONN, "Beacon interval: %d\n", notify_interval);
-	brcmf_dbg(CONN, "Signal: %d\n", notify_signal);
+	brcmf_dbg(CONN, "Signal: %d\n", bss_data.signal);
 
-	bss = cfg80211_inform_bss(wiphy, notify_channel,
-				  CFG80211_BSS_FTYPE_UNKNOWN,
-				  (const u8 *)bi->BSSID,
-				  0, notify_capability,
-				  notify_interval, notify_ie,
-				  notify_ielen, notify_signal,
-				  GFP_KERNEL);
+	bss = cfg80211_inform_bss_data(wiphy, &bss_data,
+				       CFG80211_BSS_FTYPE_UNKNOWN,
+				       (const u8 *)bi->BSSID,
+				       0, notify_capability,
+				       notify_interval, notify_ie,
+				       notify_ielen, GFP_KERNEL);
 
 	if (!bss)
 		return -ENOMEM;
@@ -6798,7 +6798,7 @@
 	int i;
 
 	/* ignore non-ISO3166 country codes */
-	for (i = 0; i < sizeof(req->alpha2); i++)
+	for (i = 0; i < 2; i++)
 		if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
 			brcmf_err("not a ISO3166 code (0x%02x 0x%02x)\n",
 				  req->alpha2[0], req->alpha2[1]);
diff --git a/drivers/net/wireless/cnss2/Kconfig b/drivers/net/wireless/cnss2/Kconfig
index daa343e..e2d3926 100644
--- a/drivers/net/wireless/cnss2/Kconfig
+++ b/drivers/net/wireless/cnss2/Kconfig
@@ -38,3 +38,27 @@
 	  during system pm.
 	  This config flag controls the feature per target based. The feature
 	  requires CNSS driver support.
+
+config CNSS_QCA6290
+	bool "Enable CNSS QCA6290 chipset specific changes"
+	---help---
+	  This enables the changes from WLAN host driver that are specific to
+	  CNSS QCA6290 chipset.
+	  These changes are needed to support the new hardware architecture
+	  for CNSS QCA6290 chipset.
+
+config CNSS_QCA6390
+	bool "Enable CNSS QCA6390 chipset specific changes"
+	---help---
+	  This enables the changes from WLAN host driver that are specific to
+	  CNSS QCA6390 chipset.
+	  These changes are needed to support the new hardware architecture
+	  for CNSS QCA6390 chipset.
+
+config CNSS_EMULATION
+	bool "Enable specific changes for emulation hardware"
+	---help---
+	  This enables the changes from WLAN drivers that are specific to
+	  emulation hardware.
+	  These changes are needed for WLAN drivers to support and meet the
+	  requirement of emulation hardware.
diff --git a/drivers/net/wireless/cnss2/Makefile b/drivers/net/wireless/cnss2/Makefile
index b49d089..318076f 100644
--- a/drivers/net/wireless/cnss2/Makefile
+++ b/drivers/net/wireless/cnss2/Makefile
@@ -1,6 +1,7 @@
 obj-$(CONFIG_CNSS2) += cnss2.o
 
 cnss2-y := main.o
+cnss2-y += bus.o
 cnss2-y += debug.o
 cnss2-y += pci.o
 cnss2-y += power.o
diff --git a/drivers/net/wireless/cnss2/bus.c b/drivers/net/wireless/cnss2/bus.c
new file mode 100644
index 0000000..6a8e67c
--- /dev/null
+++ b/drivers/net/wireless/cnss2/bus.c
@@ -0,0 +1,334 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "bus.h"
+#include "debug.h"
+#include "pci.h"
+
+enum cnss_dev_bus_type cnss_get_dev_bus_type(struct device *dev)
+{
+	if (!dev)
+		return CNSS_BUS_NONE;
+
+	if (!dev->bus)
+		return CNSS_BUS_NONE;
+
+	if (memcmp(dev->bus->name, "pci", 3) == 0)
+		return CNSS_BUS_PCI;
+	else
+		return CNSS_BUS_NONE;
+}
+
+enum cnss_dev_bus_type cnss_get_bus_type(unsigned long device_id)
+{
+	switch (device_id) {
+	case QCA6174_DEVICE_ID:
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+		return CNSS_BUS_PCI;
+	default:
+		cnss_pr_err("Unknown device_id: 0x%lx\n", device_id);
+		return CNSS_BUS_NONE;
+	}
+}
+
+void *cnss_bus_dev_to_bus_priv(struct device *dev)
+{
+	if (!dev)
+		return NULL;
+
+	switch (cnss_get_dev_bus_type(dev)) {
+	case CNSS_BUS_PCI:
+		return cnss_get_pci_priv(to_pci_dev(dev));
+	default:
+		return NULL;
+	}
+}
+
+struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev)
+{
+	void *bus_priv;
+
+	if (!dev)
+		return cnss_get_plat_priv(NULL);
+
+	bus_priv = cnss_bus_dev_to_bus_priv(dev);
+	if (!bus_priv)
+		return NULL;
+
+	switch (cnss_get_dev_bus_type(dev)) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_priv_to_plat_priv(bus_priv);
+	default:
+		return NULL;
+	}
+}
+
+int cnss_bus_init(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_init(plat_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+void cnss_bus_deinit(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		cnss_pci_deinit(plat_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return;
+	}
+}
+
+int cnss_bus_load_m3(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_load_m3(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_alloc_fw_mem(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_alloc_fw_mem(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+u32 cnss_bus_get_wake_irq(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_get_wake_msi(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_force_fw_assert_hdlr(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+void cnss_bus_fw_boot_timeout_hdlr(unsigned long data)
+{
+	struct cnss_plat_data *plat_priv = (struct cnss_plat_data *)data;
+
+	if (!plat_priv)
+		return;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_fw_boot_timeout_hdlr(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return;
+	}
+}
+
+void cnss_bus_collect_dump_info(struct cnss_plat_data *plat_priv, bool in_panic)
+{
+	if (!plat_priv)
+		return;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_collect_dump_info(plat_priv->bus_priv,
+						  in_panic);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return;
+	}
+}
+
+int cnss_bus_call_driver_probe(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_call_driver_probe(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_call_driver_remove(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_call_driver_remove(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_dev_powerup(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_dev_powerup(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_dev_shutdown(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_dev_shutdown(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_dev_crash_shutdown(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_dev_crash_shutdown(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_dev_ramdump(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_dev_ramdump(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_register_driver_hdlr(struct cnss_plat_data *plat_priv, void *data)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_register_driver_hdlr(plat_priv->bus_priv, data);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_unregister_driver_hdlr(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_unregister_driver_hdlr(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+int cnss_bus_call_driver_modem_status(struct cnss_plat_data *plat_priv,
+				      int modem_current_status)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_call_driver_modem_status(plat_priv->bus_priv,
+							 modem_current_status);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
diff --git a/drivers/net/wireless/cnss2/bus.h b/drivers/net/wireless/cnss2/bus.h
new file mode 100644
index 0000000..532438a
--- /dev/null
+++ b/drivers/net/wireless/cnss2/bus.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_BUS_H
+#define _CNSS_BUS_H
+
+#include "main.h"
+
+#define QCA6174_VENDOR_ID		0x168C
+#define QCA6174_DEVICE_ID		0x003E
+#define QCA6174_REV_ID_OFFSET		0x08
+#define QCA6174_REV3_VERSION		0x5020000
+#define QCA6174_REV3_2_VERSION		0x5030000
+#define QCA6290_VENDOR_ID		0x17CB
+#define QCA6290_DEVICE_ID		0x1100
+#define QCA6290_EMULATION_VENDOR_ID	0x168C
+#define QCA6290_EMULATION_DEVICE_ID	0xABCD
+
+enum cnss_dev_bus_type cnss_get_dev_bus_type(struct device *dev);
+enum cnss_dev_bus_type cnss_get_bus_type(unsigned long device_id);
+void *cnss_bus_dev_to_bus_priv(struct device *dev);
+struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev);
+int cnss_bus_init(struct cnss_plat_data *plat_priv);
+void cnss_bus_deinit(struct cnss_plat_data *plat_priv);
+int cnss_bus_load_m3(struct cnss_plat_data *plat_priv);
+int cnss_bus_alloc_fw_mem(struct cnss_plat_data *plat_priv);
+u32 cnss_bus_get_wake_irq(struct cnss_plat_data *plat_priv);
+int cnss_bus_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv);
+void cnss_bus_fw_boot_timeout_hdlr(unsigned long data);
+void cnss_bus_collect_dump_info(struct cnss_plat_data *plat_priv,
+				bool in_panic);
+int cnss_bus_call_driver_probe(struct cnss_plat_data *plat_priv);
+int cnss_bus_call_driver_remove(struct cnss_plat_data *plat_priv);
+int cnss_bus_dev_powerup(struct cnss_plat_data *plat_priv);
+int cnss_bus_dev_shutdown(struct cnss_plat_data *plat_priv);
+int cnss_bus_dev_crash_shutdown(struct cnss_plat_data *plat_priv);
+int cnss_bus_dev_ramdump(struct cnss_plat_data *plat_priv);
+int cnss_bus_register_driver_hdlr(struct cnss_plat_data *plat_priv, void *data);
+int cnss_bus_unregister_driver_hdlr(struct cnss_plat_data *plat_priv);
+int cnss_bus_call_driver_modem_status(struct cnss_plat_data *plat_priv,
+				      int modem_current_status);
+
+#endif /* _CNSS_BUS_H */
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index e4efb98..76ad51c 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -23,8 +23,8 @@
 #include <soc/qcom/subsystem_notif.h>
 
 #include "main.h"
+#include "bus.h"
 #include "debug.h"
-#include "pci.h"
 
 #define CNSS_DUMP_FORMAT_VER		0x11
 #define CNSS_DUMP_FORMAT_VER_V2		0x22
@@ -37,7 +37,6 @@
 #define FW_READY_TIMEOUT		20000
 #define FW_ASSERT_TIMEOUT		5000
 #define CNSS_EVENT_PENDING		2989
-#define WAKE_MSI_NAME			"WAKE"
 
 static struct cnss_plat_data *plat_env;
 
@@ -55,13 +54,6 @@
 MODULE_PARM_DESC(enable_waltest, "Enable to handle firmware waltest");
 #endif
 
-enum cnss_debug_quirks {
-	LINK_DOWN_SELF_RECOVERY,
-	SKIP_DEVICE_BOOT,
-	USE_CORE_ONLY_FW,
-	SKIP_RECOVERY,
-};
-
 unsigned long quirks;
 #ifdef CONFIG_CNSS2_DEBUG
 module_param(quirks, ulong, 0600);
@@ -87,64 +79,17 @@
 	void *data;
 };
 
-static enum cnss_dev_bus_type cnss_get_dev_bus_type(struct device *dev)
-{
-	if (!dev)
-		return CNSS_BUS_NONE;
-
-	if (!dev->bus)
-		return CNSS_BUS_NONE;
-
-	if (memcmp(dev->bus->name, "pci", 3) == 0)
-		return CNSS_BUS_PCI;
-	else
-		return CNSS_BUS_NONE;
-}
-
 static void cnss_set_plat_priv(struct platform_device *plat_dev,
 			       struct cnss_plat_data *plat_priv)
 {
 	plat_env = plat_priv;
 }
 
-static struct cnss_plat_data *cnss_get_plat_priv(struct platform_device
-						 *plat_dev)
+struct cnss_plat_data *cnss_get_plat_priv(struct platform_device *plat_dev)
 {
 	return plat_env;
 }
 
-void *cnss_bus_dev_to_bus_priv(struct device *dev)
-{
-	if (!dev)
-		return NULL;
-
-	switch (cnss_get_dev_bus_type(dev)) {
-	case CNSS_BUS_PCI:
-		return cnss_get_pci_priv(to_pci_dev(dev));
-	default:
-		return NULL;
-	}
-}
-
-struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev)
-{
-	void *bus_priv;
-
-	if (!dev)
-		return cnss_get_plat_priv(NULL);
-
-	bus_priv = cnss_bus_dev_to_bus_priv(dev);
-	if (!bus_priv)
-		return NULL;
-
-	switch (cnss_get_dev_bus_type(dev)) {
-	case CNSS_BUS_PCI:
-		return cnss_pci_priv_to_plat_priv(bus_priv);
-	default:
-		return NULL;
-	}
-}
-
 static int cnss_pm_notify(struct notifier_block *b,
 			  unsigned long event, void *p)
 {
@@ -274,23 +219,6 @@
 }
 EXPORT_SYMBOL(cnss_get_platform_cap);
 
-int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
-{
-	int ret = 0;
-	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
-	void *bus_priv = cnss_bus_dev_to_bus_priv(dev);
-
-	if (!plat_priv)
-		return -ENODEV;
-
-	ret = cnss_pci_get_bar_info(bus_priv, &info->va, &info->pa);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-EXPORT_SYMBOL(cnss_get_soc_info);
-
 void cnss_request_pm_qos(struct device *dev, u32 qos_val)
 {
 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
@@ -506,22 +434,14 @@
 }
 EXPORT_SYMBOL(cnss_set_fw_log_mode);
 
-u32 cnss_get_wake_msi(struct cnss_plat_data *plat_priv)
+bool *cnss_get_qmi_bypass(void)
 {
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-	int ret, num_vectors;
-	u32 user_base_data, base_vector;
+	return &qmi_bypass;
+}
 
-	ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
-					   WAKE_MSI_NAME, &num_vectors,
-					   &user_base_data, &base_vector);
-
-	if (ret) {
-		cnss_pr_err("WAKE MSI is not valid\n");
-		return 0;
-	}
-
-	return user_base_data;
+unsigned long *cnss_get_debug_quirks(void)
+{
+	return &quirks;
 }
 
 static int cnss_fw_mem_ready_hdlr(struct cnss_plat_data *plat_priv)
@@ -541,7 +461,7 @@
 	if (ret)
 		goto out;
 
-	ret = cnss_pci_load_m3(plat_priv->bus_priv);
+	ret = cnss_bus_load_m3(plat_priv);
 	if (ret)
 		goto out;
 
@@ -554,79 +474,6 @@
 	return ret;
 }
 
-static int cnss_driver_call_probe(struct cnss_plat_data *plat_priv)
-{
-	int ret = 0;
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-
-	if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
-		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
-		cnss_pr_dbg("Skip driver probe\n");
-		goto out;
-	}
-
-	if (!plat_priv->driver_ops) {
-		cnss_pr_err("driver_ops is NULL\n");
-		ret = -EINVAL;
-		goto out;
-	}
-
-	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
-	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
-		ret = plat_priv->driver_ops->reinit(pci_priv->pci_dev,
-						    pci_priv->pci_device_id);
-		if (ret) {
-			cnss_pr_err("Failed to reinit host driver, err = %d\n",
-				    ret);
-			goto out;
-		}
-		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
-	} else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
-		ret = plat_priv->driver_ops->probe(pci_priv->pci_dev,
-						   pci_priv->pci_device_id);
-		if (ret) {
-			cnss_pr_err("Failed to probe host driver, err = %d\n",
-				    ret);
-			goto out;
-		}
-		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
-		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
-		set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
-	}
-
-	return 0;
-
-out:
-	return ret;
-}
-
-static int cnss_driver_call_remove(struct cnss_plat_data *plat_priv)
-{
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-
-	if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state) ||
-	    test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
-	    test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
-		cnss_pr_dbg("Skip driver remove\n");
-		return 0;
-	}
-
-	if (!plat_priv->driver_ops) {
-		cnss_pr_err("driver_ops is NULL\n");
-		return -EINVAL;
-	}
-
-	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
-	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
-		plat_priv->driver_ops->shutdown(pci_priv->pci_dev);
-	} else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
-		plat_priv->driver_ops->remove(pci_priv->pci_dev);
-		clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
-	}
-
-	return 0;
-}
-
 static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
@@ -650,7 +497,7 @@
 						    QMI_WLFW_CALIBRATION_V01);
 	} else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
 		   test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
-		ret = cnss_driver_call_probe(plat_priv);
+		ret = cnss_bus_call_driver_probe(plat_priv);
 	} else {
 		complete(&plat_priv->power_up_complete);
 	}
@@ -663,9 +510,7 @@
 	return 0;
 
 shutdown:
-	cnss_pci_stop_mhi(plat_priv->bus_priv);
-	cnss_suspend_pci_link(plat_priv->bus_priv);
-	cnss_power_off_device(plat_priv);
+	cnss_bus_dev_shutdown(plat_priv);
 
 	clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
 	clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
@@ -837,44 +682,6 @@
 }
 EXPORT_SYMBOL(cnss_power_down);
 
-int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
-{
-	int ret = 0;
-	struct cnss_plat_data *plat_priv = cnss_get_plat_priv(NULL);
-
-	if (!plat_priv) {
-		cnss_pr_err("plat_priv is NULL!\n");
-		return -ENODEV;
-	}
-
-	if (plat_priv->driver_ops) {
-		cnss_pr_err("Driver has already registered!\n");
-		return -EEXIST;
-	}
-
-	ret = cnss_driver_event_post(plat_priv,
-				     CNSS_DRIVER_EVENT_REGISTER_DRIVER,
-				     CNSS_EVENT_SYNC_UNINTERRUPTIBLE,
-				     driver_ops);
-	return ret;
-}
-EXPORT_SYMBOL(cnss_wlan_register_driver);
-
-void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
-{
-	struct cnss_plat_data *plat_priv = cnss_get_plat_priv(NULL);
-
-	if (!plat_priv) {
-		cnss_pr_err("plat_priv is NULL!\n");
-		return;
-	}
-
-	cnss_driver_event_post(plat_priv,
-			       CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
-			       CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
-}
-EXPORT_SYMBOL(cnss_wlan_unregister_driver);
-
 static int cnss_get_resources(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
@@ -906,13 +713,11 @@
 {
 	struct cnss_plat_data *plat_priv =
 		container_of(nb, struct cnss_plat_data, modem_nb);
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
 	struct cnss_esoc_info *esoc_info;
-	struct cnss_wlan_driver *driver_ops;
 
 	cnss_pr_dbg("Modem notifier: event %lu\n", code);
 
-	if (!pci_priv)
+	if (!plat_priv)
 		return NOTIFY_DONE;
 
 	esoc_info = &plat_priv->esoc_info;
@@ -924,13 +729,10 @@
 	else
 		return NOTIFY_DONE;
 
-	driver_ops = plat_priv->driver_ops;
-	if (!driver_ops || !driver_ops->modem_status)
+	if (!cnss_bus_call_driver_modem_status(plat_priv,
+					       esoc_info->modem_current_status))
 		return NOTIFY_DONE;
 
-	driver_ops->modem_status(pci_priv->pci_dev,
-				 esoc_info->modem_current_status);
-
 	return NOTIFY_OK;
 }
 
@@ -1004,237 +806,6 @@
 		devm_unregister_esoc_client(dev, esoc_info->esoc_desc);
 }
 
-static int cnss_qca6174_powerup(struct cnss_plat_data *plat_priv)
-{
-	int ret = 0;
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-
-	if (!pci_priv) {
-		cnss_pr_err("pci_priv is NULL!\n");
-		return -ENODEV;
-	}
-
-	ret = cnss_power_on_device(plat_priv);
-	if (ret) {
-		cnss_pr_err("Failed to power on device, err = %d\n", ret);
-		goto out;
-	}
-
-	ret = cnss_resume_pci_link(pci_priv);
-	if (ret) {
-		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
-		goto power_off;
-	}
-
-	ret = cnss_driver_call_probe(plat_priv);
-	if (ret)
-		goto suspend_link;
-
-	return 0;
-suspend_link:
-	cnss_suspend_pci_link(pci_priv);
-power_off:
-	cnss_power_off_device(plat_priv);
-out:
-	return ret;
-}
-
-static int cnss_qca6174_shutdown(struct cnss_plat_data *plat_priv)
-{
-	int ret = 0;
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-
-	if (!pci_priv)
-		return -ENODEV;
-
-	cnss_pm_request_resume(pci_priv);
-
-	cnss_driver_call_remove(plat_priv);
-
-	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
-				   CNSS_BUS_WIDTH_NONE);
-	cnss_pci_set_monitor_wake_intr(pci_priv, false);
-	cnss_pci_set_auto_suspended(pci_priv, 0);
-
-	ret = cnss_suspend_pci_link(pci_priv);
-	if (ret)
-		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
-
-	cnss_power_off_device(plat_priv);
-
-	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
-
-	return ret;
-}
-
-static void cnss_qca6174_crash_shutdown(struct cnss_plat_data *plat_priv)
-{
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-
-	if (!plat_priv->driver_ops)
-		return;
-
-	plat_priv->driver_ops->crash_shutdown(pci_priv->pci_dev);
-}
-
-static int cnss_qca6290_powerup(struct cnss_plat_data *plat_priv)
-{
-	int ret = 0;
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-	unsigned int timeout;
-
-	if (!pci_priv) {
-		cnss_pr_err("pci_priv is NULL!\n");
-		return -ENODEV;
-	}
-
-	if (plat_priv->ramdump_info_v2.dump_data_valid ||
-	    test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
-		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
-		cnss_pci_clear_dump_info(pci_priv);
-	}
-
-	ret = cnss_power_on_device(plat_priv);
-	if (ret) {
-		cnss_pr_err("Failed to power on device, err = %d\n", ret);
-		goto out;
-	}
-
-	ret = cnss_resume_pci_link(pci_priv);
-	if (ret) {
-		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
-		goto power_off;
-	}
-
-	timeout = cnss_get_qmi_timeout();
-
-	ret = cnss_pci_start_mhi(pci_priv);
-	if (ret) {
-		cnss_pr_err("Failed to start MHI, err = %d\n", ret);
-		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
-		    !pci_priv->pci_link_down_ind && timeout)
-			mod_timer(&plat_priv->fw_boot_timer,
-				  jiffies + msecs_to_jiffies(timeout >> 1));
-		return 0;
-	}
-
-	if (test_bit(USE_CORE_ONLY_FW, &quirks)) {
-		clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
-		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
-		return 0;
-	}
-
-	cnss_set_pin_connect_status(plat_priv);
-
-	if (qmi_bypass) {
-		ret = cnss_driver_call_probe(plat_priv);
-		if (ret)
-			goto stop_mhi;
-	} else if (timeout) {
-		mod_timer(&plat_priv->fw_boot_timer,
-			  jiffies + msecs_to_jiffies(timeout << 1));
-	}
-
-	return 0;
-
-stop_mhi:
-	cnss_pci_stop_mhi(pci_priv);
-	cnss_suspend_pci_link(pci_priv);
-power_off:
-	cnss_power_off_device(plat_priv);
-out:
-	return ret;
-}
-
-static int cnss_qca6290_shutdown(struct cnss_plat_data *plat_priv)
-{
-	int ret = 0;
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-
-	if (!pci_priv)
-		return -ENODEV;
-
-	cnss_pm_request_resume(pci_priv);
-
-	cnss_driver_call_remove(plat_priv);
-
-	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
-				   CNSS_BUS_WIDTH_NONE);
-	cnss_pci_set_monitor_wake_intr(pci_priv, false);
-	cnss_pci_set_auto_suspended(pci_priv, 0);
-
-	cnss_pci_stop_mhi(pci_priv);
-
-	ret = cnss_suspend_pci_link(pci_priv);
-	if (ret)
-		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
-
-	cnss_power_off_device(plat_priv);
-
-	clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
-	clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
-	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
-
-	return ret;
-}
-
-static void cnss_qca6290_crash_shutdown(struct cnss_plat_data *plat_priv)
-{
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-
-	cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
-		    plat_priv->driver_state);
-
-	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
-		cnss_pr_dbg("Ignore crash shutdown\n");
-		return;
-	}
-
-	cnss_pci_collect_dump_info(pci_priv, true);
-}
-
-static int cnss_powerup(struct cnss_plat_data *plat_priv)
-{
-	int ret;
-
-	switch (plat_priv->device_id) {
-	case QCA6174_DEVICE_ID:
-		ret = cnss_qca6174_powerup(plat_priv);
-		break;
-	case QCA6290_EMULATION_DEVICE_ID:
-	case QCA6290_DEVICE_ID:
-		ret = cnss_qca6290_powerup(plat_priv);
-		break;
-	default:
-		cnss_pr_err("Unknown device_id found: 0x%lx\n",
-			    plat_priv->device_id);
-		ret = -ENODEV;
-	}
-
-	return ret;
-}
-
-static int cnss_shutdown(struct cnss_plat_data *plat_priv)
-{
-	int ret;
-
-	switch (plat_priv->device_id) {
-	case QCA6174_DEVICE_ID:
-		ret = cnss_qca6174_shutdown(plat_priv);
-		break;
-	case QCA6290_EMULATION_DEVICE_ID:
-	case QCA6290_DEVICE_ID:
-		ret = cnss_qca6290_shutdown(plat_priv);
-		break;
-	default:
-		cnss_pr_err("Unknown device_id found: 0x%lx\n",
-			    plat_priv->device_id);
-		ret = -ENODEV;
-	}
-
-	return ret;
-}
-
 static int cnss_subsys_powerup(const struct subsys_desc *subsys_desc)
 {
 	struct cnss_plat_data *plat_priv;
@@ -1255,7 +826,7 @@
 		return 0;
 	}
 
-	return cnss_powerup(plat_priv);
+	return cnss_bus_dev_powerup(plat_priv);
 }
 
 static int cnss_subsys_shutdown(const struct subsys_desc *subsys_desc,
@@ -1279,110 +850,9 @@
 		return 0;
 	}
 
-	return cnss_shutdown(plat_priv);
+	return cnss_bus_dev_shutdown(plat_priv);
 }
 
-static int cnss_qca6290_ramdump(struct cnss_plat_data *plat_priv)
-{
-	struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
-	struct cnss_dump_data *dump_data = &info_v2->dump_data;
-	struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
-	struct ramdump_segment *ramdump_segs, *s;
-	int i, ret = 0;
-
-	if (!info_v2->dump_data_valid ||
-	    dump_data->nentries == 0)
-		return 0;
-
-	ramdump_segs = kcalloc(dump_data->nentries,
-			       sizeof(*ramdump_segs),
-			       GFP_KERNEL);
-	if (!ramdump_segs)
-		return -ENOMEM;
-
-	s = ramdump_segs;
-	for (i = 0; i < dump_data->nentries; i++) {
-		s->address = dump_seg->address;
-		s->v_address = dump_seg->v_address;
-		s->size = dump_seg->size;
-		s++;
-		dump_seg++;
-	}
-
-	ret = do_elf_ramdump(info_v2->ramdump_dev, ramdump_segs,
-			     dump_data->nentries);
-	kfree(ramdump_segs);
-
-	cnss_pci_set_mhi_state(plat_priv->bus_priv, CNSS_MHI_DEINIT);
-	cnss_pci_clear_dump_info(plat_priv->bus_priv);
-
-	return ret;
-}
-
-static int cnss_qca6174_ramdump(struct cnss_plat_data *plat_priv)
-{
-	int ret = 0;
-	struct cnss_ramdump_info *ramdump_info;
-	struct ramdump_segment segment;
-
-	ramdump_info = &plat_priv->ramdump_info;
-	if (!ramdump_info->ramdump_size)
-		return -EINVAL;
-
-	memset(&segment, 0, sizeof(segment));
-	segment.v_address = ramdump_info->ramdump_va;
-	segment.size = ramdump_info->ramdump_size;
-	ret = do_ramdump(ramdump_info->ramdump_dev, &segment, 1);
-
-	return ret;
-}
-
-static int cnss_subsys_ramdump(int enable,
-			       const struct subsys_desc *subsys_desc)
-{
-	int ret = 0;
-	struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
-
-	if (!plat_priv) {
-		cnss_pr_err("plat_priv is NULL!\n");
-		return -ENODEV;
-	}
-
-	if (!enable)
-		return 0;
-
-	switch (plat_priv->device_id) {
-	case QCA6174_DEVICE_ID:
-		ret = cnss_qca6174_ramdump(plat_priv);
-		break;
-	case QCA6290_EMULATION_DEVICE_ID:
-	case QCA6290_DEVICE_ID:
-		ret = cnss_qca6290_ramdump(plat_priv);
-		break;
-	default:
-		cnss_pr_err("Unknown device_id found: 0x%lx\n",
-			    plat_priv->device_id);
-		ret = -ENODEV;
-	}
-
-	return ret;
-}
-
-void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size)
-{
-	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
-	struct cnss_ramdump_info *ramdump_info;
-
-	if (!plat_priv)
-		return NULL;
-
-	ramdump_info = &plat_priv->ramdump_info;
-	*size = ramdump_info->ramdump_size;
-
-	return ramdump_info->ramdump_va;
-}
-EXPORT_SYMBOL(cnss_get_virt_ramdump_mem);
-
 void cnss_device_crashed(struct device *dev)
 {
 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
@@ -1405,24 +875,44 @@
 	struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
 
 	if (!plat_priv) {
-		cnss_pr_err("plat_priv is NULL!\n");
+		cnss_pr_err("plat_priv is NULL\n");
 		return;
 	}
 
-	switch (plat_priv->device_id) {
-	case QCA6174_DEVICE_ID:
-		cnss_qca6174_crash_shutdown(plat_priv);
-		break;
-	case QCA6290_EMULATION_DEVICE_ID:
-	case QCA6290_DEVICE_ID:
-		cnss_qca6290_crash_shutdown(plat_priv);
-		break;
-	default:
-		cnss_pr_err("Unknown device_id found: 0x%lx\n",
-			    plat_priv->device_id);
-	}
+	cnss_bus_dev_crash_shutdown(plat_priv);
 }
 
+static int cnss_subsys_ramdump(int enable,
+			       const struct subsys_desc *subsys_desc)
+{
+	struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	if (!enable)
+		return 0;
+
+	return cnss_bus_dev_ramdump(plat_priv);
+}
+
+void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+	struct cnss_ramdump_info *ramdump_info;
+
+	if (!plat_priv)
+		return NULL;
+
+	ramdump_info = &plat_priv->ramdump_info;
+	*size = ramdump_info->ramdump_size;
+
+	return ramdump_info->ramdump_va;
+}
+EXPORT_SYMBOL(cnss_get_virt_ramdump_mem);
+
 static const char *cnss_recovery_reason_to_str(enum cnss_recovery_reason reason)
 {
 	switch (reason) {
@@ -1442,7 +932,6 @@
 static int cnss_do_recovery(struct cnss_plat_data *plat_priv,
 			    enum cnss_recovery_reason reason)
 {
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
 	struct cnss_subsys_info *subsys_info =
 		&plat_priv->subsys_info;
 
@@ -1451,11 +940,6 @@
 	if (plat_priv->device_id == QCA6174_DEVICE_ID)
 		goto self_recovery;
 
-	if (plat_priv->driver_ops &&
-	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state))
-		plat_priv->driver_ops->update_status(pci_priv->pci_dev,
-						     CNSS_RECOVERY);
-
 	if (test_bit(SKIP_RECOVERY, &quirks)) {
 		cnss_pr_dbg("Skip device recovery\n");
 		return 0;
@@ -1468,7 +952,7 @@
 		break;
 	case CNSS_REASON_RDDM:
 		clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
-		cnss_pci_collect_dump_info(pci_priv, false);
+		cnss_bus_collect_dump_info(plat_priv, false);
 		break;
 	case CNSS_REASON_DEFAULT:
 	case CNSS_REASON_TIMEOUT:
@@ -1488,8 +972,8 @@
 	return 0;
 
 self_recovery:
-	cnss_shutdown(plat_priv);
-	cnss_powerup(plat_priv);
+	cnss_bus_dev_shutdown(plat_priv);
+	cnss_bus_dev_powerup(plat_priv);
 
 	return 0;
 }
@@ -1575,28 +1059,6 @@
 }
 EXPORT_SYMBOL(cnss_schedule_recovery);
 
-static int cnss_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv)
-{
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-	int ret;
-
-	ret = cnss_pci_set_mhi_state(plat_priv->bus_priv,
-				     CNSS_MHI_TRIGGER_RDDM);
-	if (ret) {
-		cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
-		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
-				       CNSS_REASON_DEFAULT);
-		return 0;
-	}
-
-	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
-		mod_timer(&plat_priv->fw_boot_timer,
-			  jiffies + msecs_to_jiffies(FW_ASSERT_TIMEOUT));
-	}
-
-	return 0;
-}
-
 int cnss_force_fw_assert(struct device *dev)
 {
 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
@@ -1624,49 +1086,12 @@
 }
 EXPORT_SYMBOL(cnss_force_fw_assert);
 
-void fw_boot_timeout(unsigned long data)
-{
-	struct cnss_plat_data *plat_priv = (struct cnss_plat_data *)data;
-	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
-
-	cnss_pr_err("Timeout waiting for FW ready indication!\n");
-
-	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
-			       CNSS_REASON_TIMEOUT);
-}
-
-static int cnss_register_driver_hdlr(struct cnss_plat_data *plat_priv,
-				     void *data)
-{
-	int ret = 0;
-
-	set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
-	plat_priv->driver_ops = data;
-
-	ret = cnss_powerup(plat_priv);
-	if (ret) {
-		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
-		plat_priv->driver_ops = NULL;
-	}
-
-	return ret;
-}
-
-static int cnss_unregister_driver_hdlr(struct cnss_plat_data *plat_priv)
-{
-	set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
-	cnss_shutdown(plat_priv);
-	plat_priv->driver_ops = NULL;
-
-	return 0;
-}
-
 static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
 
 	set_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
-	ret = cnss_powerup(plat_priv);
+	ret = cnss_bus_dev_powerup(plat_priv);
 	if (ret)
 		clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
 
@@ -1677,7 +1102,7 @@
 {
 	plat_priv->cal_done = true;
 	cnss_wlfw_wlan_mode_send_sync(plat_priv, QMI_WLFW_OFF_V01);
-	cnss_shutdown(plat_priv);
+	cnss_bus_dev_shutdown(plat_priv);
 	clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
 
 	return 0;
@@ -1685,12 +1110,12 @@
 
 static int cnss_power_up_hdlr(struct cnss_plat_data *plat_priv)
 {
-	return cnss_powerup(plat_priv);
+	return cnss_bus_dev_powerup(plat_priv);
 }
 
 static int cnss_power_down_hdlr(struct cnss_plat_data *plat_priv)
 {
-	cnss_shutdown(plat_priv);
+	cnss_bus_dev_shutdown(plat_priv);
 
 	return 0;
 }
@@ -1731,7 +1156,7 @@
 			ret = cnss_wlfw_server_exit(plat_priv);
 			break;
 		case CNSS_DRIVER_EVENT_REQUEST_MEM:
-			ret = cnss_pci_alloc_fw_mem(plat_priv->bus_priv);
+			ret = cnss_bus_alloc_fw_mem(plat_priv);
 			if (ret)
 				break;
 			ret = cnss_wlfw_respond_mem_send_sync(plat_priv);
@@ -1749,18 +1174,18 @@
 			ret = cnss_cold_boot_cal_done_hdlr(plat_priv);
 			break;
 		case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
-			ret = cnss_register_driver_hdlr(plat_priv,
-							event->data);
+			ret = cnss_bus_register_driver_hdlr(plat_priv,
+							    event->data);
 			break;
 		case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
-			ret = cnss_unregister_driver_hdlr(plat_priv);
+			ret = cnss_bus_unregister_driver_hdlr(plat_priv);
 			break;
 		case CNSS_DRIVER_EVENT_RECOVERY:
 			ret = cnss_driver_recovery_hdlr(plat_priv,
 							event->data);
 			break;
 		case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
-			ret = cnss_force_fw_assert_hdlr(plat_priv);
+			ret = cnss_bus_force_fw_assert_hdlr(plat_priv);
 			break;
 		case CNSS_DRIVER_EVENT_POWER_UP:
 			ret = cnss_power_up_hdlr(plat_priv);
@@ -1862,7 +1287,7 @@
 	return msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
 }
 
-static int cnss_qca6174_register_ramdump(struct cnss_plat_data *plat_priv)
+static int cnss_register_ramdump_v1(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
 	struct device *dev;
@@ -1913,7 +1338,7 @@
 	return ret;
 }
 
-static void cnss_qca6174_unregister_ramdump(struct cnss_plat_data *plat_priv)
+static void cnss_unregister_ramdump_v1(struct cnss_plat_data *plat_priv)
 {
 	struct device *dev;
 	struct cnss_ramdump_info *ramdump_info;
@@ -1930,7 +1355,7 @@
 				  ramdump_info->ramdump_pa);
 }
 
-static int cnss_qca6290_register_ramdump(struct cnss_plat_data *plat_priv)
+static int cnss_register_ramdump_v2(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
 	struct cnss_subsys_info *subsys_info;
@@ -1986,7 +1411,7 @@
 	return ret;
 }
 
-static void cnss_qca6290_unregister_ramdump(struct cnss_plat_data *plat_priv)
+static void cnss_unregister_ramdump_v2(struct cnss_plat_data *plat_priv)
 {
 	struct cnss_ramdump_info_v2 *info_v2;
 
@@ -2006,11 +1431,11 @@
 
 	switch (plat_priv->device_id) {
 	case QCA6174_DEVICE_ID:
-		ret = cnss_qca6174_register_ramdump(plat_priv);
+		ret = cnss_register_ramdump_v1(plat_priv);
 		break;
 	case QCA6290_EMULATION_DEVICE_ID:
 	case QCA6290_DEVICE_ID:
-		ret = cnss_qca6290_register_ramdump(plat_priv);
+		ret = cnss_register_ramdump_v2(plat_priv);
 		break;
 	default:
 		cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
@@ -2024,11 +1449,11 @@
 {
 	switch (plat_priv->device_id) {
 	case QCA6174_DEVICE_ID:
-		cnss_qca6174_unregister_ramdump(plat_priv);
+		cnss_unregister_ramdump_v1(plat_priv);
 		break;
 	case QCA6290_EMULATION_DEVICE_ID:
 	case QCA6290_DEVICE_ID:
-		cnss_qca6290_unregister_ramdump(plat_priv);
+		cnss_unregister_ramdump_v2(plat_priv);
 		break;
 	default:
 		cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
@@ -2204,6 +1629,7 @@
 
 	plat_priv->plat_dev = plat_dev;
 	plat_priv->device_id = device_id->driver_data;
+	plat_priv->bus_type = cnss_get_bus_type(plat_priv->device_id);
 	cnss_set_plat_priv(plat_dev, plat_priv);
 	platform_set_drvdata(plat_dev, plat_priv);
 
@@ -2216,14 +1642,14 @@
 		if (ret)
 			goto free_res;
 
-		ret = cnss_pci_init(plat_priv);
+		ret = cnss_bus_init(plat_priv);
 		if (ret)
 			goto power_off;
 	}
 
 	ret = cnss_register_esoc(plat_priv);
 	if (ret)
-		goto deinit_pci;
+		goto deinit_bus;
 
 	ret = cnss_register_bus_scale(plat_priv);
 	if (ret)
@@ -2245,8 +1671,8 @@
 	if (ret)
 		goto deinit_qmi;
 
-	setup_timer(&plat_priv->fw_boot_timer,
-		    fw_boot_timeout, (unsigned long)plat_priv);
+	setup_timer(&plat_priv->fw_boot_timer, cnss_bus_fw_boot_timeout_hdlr,
+		    (unsigned long)plat_priv);
 
 	register_pm_notifier(&cnss_pm_notifier);
 
@@ -2272,9 +1698,9 @@
 	cnss_unregister_bus_scale(plat_priv);
 unreg_esoc:
 	cnss_unregister_esoc(plat_priv);
-deinit_pci:
+deinit_bus:
 	if (!test_bit(SKIP_DEVICE_BOOT, &quirks))
-		cnss_pci_deinit(plat_priv);
+		cnss_bus_deinit(plat_priv);
 power_off:
 	if (!test_bit(SKIP_DEVICE_BOOT, &quirks))
 		cnss_power_off_device(plat_priv);
@@ -2301,7 +1727,7 @@
 	cnss_remove_sysfs(plat_priv);
 	cnss_unregister_bus_scale(plat_priv);
 	cnss_unregister_esoc(plat_priv);
-	cnss_pci_deinit(plat_priv);
+	cnss_bus_deinit(plat_priv);
 	cnss_put_resources(plat_priv);
 	platform_set_drvdata(plat_dev, NULL);
 	plat_env = NULL;
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index b62c014..509974a0 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -170,9 +170,17 @@
 	u32 host_pin_result;
 };
 
+enum cnss_debug_quirks {
+	LINK_DOWN_SELF_RECOVERY,
+	SKIP_DEVICE_BOOT,
+	USE_CORE_ONLY_FW,
+	SKIP_RECOVERY,
+};
+
 struct cnss_plat_data {
 	struct platform_device *plat_dev;
 	void *bus_priv;
+	enum cnss_dev_bus_type bus_type;
 	struct cnss_vreg_info *vreg_info;
 	struct cnss_pinctrl_info pinctrl_info;
 	struct cnss_subsys_info subsys_info;
@@ -184,7 +192,6 @@
 	struct cnss_platform_cap cap;
 	struct pm_qos_request qos_request;
 	unsigned long device_id;
-	struct cnss_wlan_driver *driver_ops;
 	enum cnss_driver_status driver_status;
 	u32 recovery_count;
 	unsigned long driver_state;
@@ -215,8 +222,9 @@
 	bool cal_done;
 };
 
-void *cnss_bus_dev_to_bus_priv(struct device *dev);
-struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev);
+struct cnss_plat_data *cnss_get_plat_priv(struct platform_device *plat_dev);
+bool *cnss_get_qmi_bypass(void);
+unsigned long *cnss_get_debug_quirks(void);
 int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
 			   enum cnss_driver_event_type type,
 			   u32 flags, void *data);
@@ -229,6 +237,5 @@
 int cnss_register_ramdump(struct cnss_plat_data *plat_priv);
 void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv);
 void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv);
-u32 cnss_get_wake_msi(struct cnss_plat_data *plat_priv);
 
 #endif /* _CNSS_MAIN_H */
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 4726750..e1704fb 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -17,8 +17,10 @@
 #include <linux/of.h>
 #include <linux/pm_runtime.h>
 #include <linux/memblock.h>
+#include <soc/qcom/ramdump.h>
 
 #include "main.h"
+#include "bus.h"
 #include "debug.h"
 #include "pci.h"
 
@@ -43,6 +45,10 @@
 #define MAX_M3_FILE_NAME_LENGTH		13
 #define DEFAULT_M3_FILE_NAME		"m3.bin"
 
+#define WAKE_MSI_NAME			"WAKE"
+
+#define FW_ASSERT_TIMEOUT		5000
+
 static DEFINE_SPINLOCK(pci_link_down_lock);
 
 static unsigned int pci_link_down_panic;
@@ -222,6 +228,510 @@
 }
 EXPORT_SYMBOL(cnss_pci_link_down);
 
+int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	plat_priv = pci_priv->plat_priv;
+
+	if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
+		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+		cnss_pr_dbg("Skip driver probe\n");
+		goto out;
+	}
+
+	if (!pci_priv->driver_ops) {
+		cnss_pr_err("driver_ops is NULL\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
+	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
+		ret = pci_priv->driver_ops->reinit(pci_priv->pci_dev,
+						   pci_priv->pci_device_id);
+		if (ret) {
+			cnss_pr_err("Failed to reinit host driver, err = %d\n",
+				    ret);
+			goto out;
+		}
+		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+	} else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
+		ret = pci_priv->driver_ops->probe(pci_priv->pci_dev,
+						  pci_priv->pci_device_id);
+		if (ret) {
+			cnss_pr_err("Failed to probe host driver, err = %d\n",
+				    ret);
+			goto out;
+		}
+		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+		set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+	}
+
+	return 0;
+
+out:
+	return ret;
+}
+
+int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	plat_priv = pci_priv->plat_priv;
+
+	if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state) ||
+	    test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
+	    test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
+		cnss_pr_dbg("Skip driver remove\n");
+		return 0;
+	}
+
+	if (!pci_priv->driver_ops) {
+		cnss_pr_err("driver_ops is NULL\n");
+		return -EINVAL;
+	}
+
+	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
+	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
+		pci_priv->driver_ops->shutdown(pci_priv->pci_dev);
+	} else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
+		pci_priv->driver_ops->remove(pci_priv->pci_dev);
+		clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+	}
+
+	return 0;
+}
+
+int cnss_pci_call_driver_modem_status(struct cnss_pci_data *pci_priv,
+				      int modem_current_status)
+{
+	struct cnss_wlan_driver *driver_ops;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	driver_ops = pci_priv->driver_ops;
+	if (!driver_ops || !driver_ops->modem_status)
+		return -EINVAL;
+
+	driver_ops->modem_status(pci_priv->pci_dev, modem_current_status);
+
+	return 0;
+}
+
+static int cnss_qca6174_powerup(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+	ret = cnss_power_on_device(plat_priv);
+	if (ret) {
+		cnss_pr_err("Failed to power on device, err = %d\n", ret);
+		goto out;
+	}
+
+	ret = cnss_resume_pci_link(pci_priv);
+	if (ret) {
+		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
+		goto power_off;
+	}
+
+	ret = cnss_pci_call_driver_probe(pci_priv);
+	if (ret)
+		goto suspend_link;
+
+	return 0;
+suspend_link:
+	cnss_suspend_pci_link(pci_priv);
+power_off:
+	cnss_power_off_device(plat_priv);
+out:
+	return ret;
+}
+
+static int cnss_qca6174_shutdown(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+	cnss_pm_request_resume(pci_priv);
+
+	cnss_pci_call_driver_remove(pci_priv);
+
+	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
+				   CNSS_BUS_WIDTH_NONE);
+	cnss_pci_set_monitor_wake_intr(pci_priv, false);
+	cnss_pci_set_auto_suspended(pci_priv, 0);
+
+	ret = cnss_suspend_pci_link(pci_priv);
+	if (ret)
+		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
+
+	cnss_power_off_device(plat_priv);
+
+	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+
+	return ret;
+}
+
+static void cnss_qca6174_crash_shutdown(struct cnss_pci_data *pci_priv)
+{
+	if (pci_priv->driver_ops && pci_priv->driver_ops->crash_shutdown)
+		pci_priv->driver_ops->crash_shutdown(pci_priv->pci_dev);
+}
+
+static int cnss_qca6174_ramdump(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct cnss_ramdump_info *ramdump_info;
+	struct ramdump_segment segment;
+
+	ramdump_info = &plat_priv->ramdump_info;
+	if (!ramdump_info->ramdump_size)
+		return -EINVAL;
+
+	memset(&segment, 0, sizeof(segment));
+	segment.v_address = ramdump_info->ramdump_va;
+	segment.size = ramdump_info->ramdump_size;
+	ret = do_ramdump(ramdump_info->ramdump_dev, &segment, 1);
+
+	return ret;
+}
+
+static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	unsigned int timeout;
+
+	if (plat_priv->ramdump_info_v2.dump_data_valid ||
+	    test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
+		cnss_pci_clear_dump_info(pci_priv);
+	}
+
+	ret = cnss_power_on_device(plat_priv);
+	if (ret) {
+		cnss_pr_err("Failed to power on device, err = %d\n", ret);
+		goto out;
+	}
+
+	ret = cnss_resume_pci_link(pci_priv);
+	if (ret) {
+		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
+		goto power_off;
+	}
+
+	timeout = cnss_get_qmi_timeout();
+
+	ret = cnss_pci_start_mhi(pci_priv);
+	if (ret) {
+		cnss_pr_err("Failed to start MHI, err = %d\n", ret);
+		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
+		    !pci_priv->pci_link_down_ind && timeout)
+			mod_timer(&plat_priv->fw_boot_timer,
+				  jiffies + msecs_to_jiffies(timeout >> 1));
+		return 0;
+	}
+
+	if (test_bit(USE_CORE_ONLY_FW, cnss_get_debug_quirks())) {
+		clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
+		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+		return 0;
+	}
+
+	cnss_set_pin_connect_status(plat_priv);
+
+	if (*cnss_get_qmi_bypass()) {
+		ret = cnss_pci_call_driver_probe(pci_priv);
+		if (ret)
+			goto stop_mhi;
+	} else if (timeout) {
+		mod_timer(&plat_priv->fw_boot_timer,
+			  jiffies + msecs_to_jiffies(timeout << 1));
+	}
+
+	return 0;
+
+stop_mhi:
+	cnss_pci_stop_mhi(pci_priv);
+	cnss_suspend_pci_link(pci_priv);
+power_off:
+	cnss_power_off_device(plat_priv);
+out:
+	return ret;
+}
+
+static int cnss_qca6290_shutdown(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+	cnss_pm_request_resume(pci_priv);
+
+	cnss_pci_call_driver_remove(pci_priv);
+
+	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
+				   CNSS_BUS_WIDTH_NONE);
+	cnss_pci_set_monitor_wake_intr(pci_priv, false);
+	cnss_pci_set_auto_suspended(pci_priv, 0);
+
+	cnss_pci_stop_mhi(pci_priv);
+
+	ret = cnss_suspend_pci_link(pci_priv);
+	if (ret)
+		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
+
+	cnss_power_off_device(plat_priv);
+
+	clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
+	clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
+	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+
+	return ret;
+}
+
+static void cnss_qca6290_crash_shutdown(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+	cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
+		    plat_priv->driver_state);
+
+	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+		cnss_pr_dbg("Ignore crash shutdown\n");
+		return;
+	}
+
+	cnss_pci_collect_dump_info(pci_priv, true);
+}
+
+static int cnss_qca6290_ramdump(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
+	struct cnss_dump_data *dump_data = &info_v2->dump_data;
+	struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
+	struct ramdump_segment *ramdump_segs, *s;
+	int i, ret = 0;
+
+	if (!info_v2->dump_data_valid ||
+	    dump_data->nentries == 0)
+		return 0;
+
+	ramdump_segs = kcalloc(dump_data->nentries,
+			       sizeof(*ramdump_segs),
+			       GFP_KERNEL);
+	if (!ramdump_segs)
+		return -ENOMEM;
+
+	s = ramdump_segs;
+	for (i = 0; i < dump_data->nentries; i++) {
+		s->address = dump_seg->address;
+		s->v_address = dump_seg->v_address;
+		s->size = dump_seg->size;
+		s++;
+		dump_seg++;
+	}
+
+	ret = do_elf_ramdump(info_v2->ramdump_dev, ramdump_segs,
+			     dump_data->nentries);
+	kfree(ramdump_segs);
+
+	cnss_pci_set_mhi_state(plat_priv->bus_priv, CNSS_MHI_DEINIT);
+	cnss_pci_clear_dump_info(plat_priv->bus_priv);
+
+	return ret;
+}
+
+int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	switch (pci_priv->device_id) {
+	case QCA6174_DEVICE_ID:
+		ret = cnss_qca6174_powerup(pci_priv);
+		break;
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+		ret = cnss_qca6290_powerup(pci_priv);
+		break;
+	default:
+		cnss_pr_err("Unknown device_id found: 0x%x\n",
+			    pci_priv->device_id);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+int cnss_pci_dev_shutdown(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	switch (pci_priv->device_id) {
+	case QCA6174_DEVICE_ID:
+		ret = cnss_qca6174_shutdown(pci_priv);
+		break;
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+		ret = cnss_qca6290_shutdown(pci_priv);
+		break;
+	default:
+		cnss_pr_err("Unknown device_id found: 0x%x\n",
+			    pci_priv->device_id);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+int cnss_pci_dev_crash_shutdown(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	switch (pci_priv->device_id) {
+	case QCA6174_DEVICE_ID:
+		cnss_qca6174_crash_shutdown(pci_priv);
+		break;
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+		cnss_qca6290_crash_shutdown(pci_priv);
+		break;
+	default:
+		cnss_pr_err("Unknown device_id found: 0x%x\n",
+			    pci_priv->device_id);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+int cnss_pci_dev_ramdump(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	switch (pci_priv->device_id) {
+	case QCA6174_DEVICE_ID:
+		ret = cnss_qca6174_ramdump(pci_priv);
+		break;
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+		ret = cnss_qca6290_ramdump(pci_priv);
+		break;
+	default:
+		cnss_pr_err("Unknown device_id found: 0x%x\n",
+			    pci_priv->device_id);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+	struct cnss_pci_data *pci_priv;
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	pci_priv = plat_priv->bus_priv;
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	if (pci_priv->driver_ops) {
+		cnss_pr_err("Driver has already registered\n");
+		return -EEXIST;
+	}
+
+	ret = cnss_driver_event_post(plat_priv,
+				     CNSS_DRIVER_EVENT_REGISTER_DRIVER,
+				     CNSS_EVENT_SYNC_UNINTERRUPTIBLE,
+				     driver_ops);
+	return ret;
+}
+EXPORT_SYMBOL(cnss_wlan_register_driver);
+
+void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL\n");
+		return;
+	}
+
+	cnss_driver_event_post(plat_priv,
+			       CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
+			       CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
+}
+EXPORT_SYMBOL(cnss_wlan_unregister_driver);
+
+int cnss_pci_register_driver_hdlr(struct cnss_pci_data *pci_priv,
+				  void *data)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+	set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+	pci_priv->driver_ops = data;
+
+	ret = cnss_pci_dev_powerup(pci_priv);
+	if (ret) {
+		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+		pci_priv->driver_ops = NULL;
+	}
+
+	return ret;
+}
+
+int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+	set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+	cnss_pci_dev_shutdown(pci_priv);
+	pci_priv->driver_ops = NULL;
+
+	return 0;
+}
+
 static int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
 {
 	int ret = 0;
@@ -377,7 +887,6 @@
 	int ret = 0;
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
-	struct cnss_plat_data *plat_priv;
 	struct cnss_wlan_driver *driver_ops;
 
 	pm_message_t state = { .event = PM_EVENT_SUSPEND };
@@ -385,11 +894,7 @@
 	if (!pci_priv)
 		goto out;
 
-	plat_priv = pci_priv->plat_priv;
-	if (!plat_priv)
-		goto out;
-
-	driver_ops = plat_priv->driver_ops;
+	driver_ops = pci_priv->driver_ops;
 	if (driver_ops && driver_ops->suspend) {
 		ret = driver_ops->suspend(pci_dev, state);
 		if (ret) {
@@ -400,7 +905,7 @@
 		}
 	}
 
-	if (pci_priv->pci_link_state) {
+	if (pci_priv->pci_link_state == PCI_LINK_UP) {
 		ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND);
 		if (ret) {
 			if (driver_ops && driver_ops->resume)
@@ -432,31 +937,29 @@
 	int ret = 0;
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
-	struct cnss_plat_data *plat_priv;
 	struct cnss_wlan_driver *driver_ops;
 
 	if (!pci_priv)
 		goto out;
 
-	plat_priv = pci_priv->plat_priv;
-	if (!plat_priv)
-		goto out;
-
 	if (pci_priv->pci_link_down_ind)
 		goto out;
 
-	ret = pci_enable_device(pci_dev);
-	if (ret)
-		cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
+	if (pci_priv->pci_link_state == PCI_LINK_UP) {
+		ret = pci_enable_device(pci_dev);
+		if (ret)
+			cnss_pr_err("Failed to enable PCI device, err = %d\n",
+				    ret);
 
-	if (pci_priv->saved_state)
-		cnss_set_pci_config_space(pci_priv,
-					  RESTORE_PCI_CONFIG_SPACE);
+		if (pci_priv->saved_state)
+			cnss_set_pci_config_space(pci_priv,
+						  RESTORE_PCI_CONFIG_SPACE);
 
-	pci_set_master(pci_dev);
-	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+		pci_set_master(pci_dev);
+		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+	}
 
-	driver_ops = plat_priv->driver_ops;
+	driver_ops = pci_priv->driver_ops;
 	if (driver_ops && driver_ops->resume) {
 		ret = driver_ops->resume(pci_dev);
 		if (ret)
@@ -475,20 +978,20 @@
 	int ret = 0;
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
-	struct cnss_plat_data *plat_priv;
 	struct cnss_wlan_driver *driver_ops;
 
 	if (!pci_priv)
 		goto out;
 
-	plat_priv = pci_priv->plat_priv;
-	if (!plat_priv)
-		goto out;
-
-	driver_ops = plat_priv->driver_ops;
+	driver_ops = pci_priv->driver_ops;
 	if (driver_ops && driver_ops->suspend_noirq)
 		ret = driver_ops->suspend_noirq(pci_dev);
 
+	ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN);
+	if (ret)
+		goto out;
+	pci_priv->pci_link_state = PCI_LINK_DOWN;
+
 out:
 	return ret;
 }
@@ -498,17 +1001,17 @@
 	int ret = 0;
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
-	struct cnss_plat_data *plat_priv;
 	struct cnss_wlan_driver *driver_ops;
 
 	if (!pci_priv)
 		goto out;
 
-	plat_priv = pci_priv->plat_priv;
-	if (!plat_priv)
+	ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP);
+	if (ret)
 		goto out;
+	pci_priv->pci_link_state = PCI_LINK_UP;
 
-	driver_ops = plat_priv->driver_ops;
+	driver_ops = pci_priv->driver_ops;
 	if (driver_ops && driver_ops->resume_noirq &&
 	    !pci_priv->pci_link_down_ind)
 		ret = driver_ops->resume_noirq(pci_dev);
@@ -522,16 +1025,11 @@
 	int ret = 0;
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
-	struct cnss_plat_data *plat_priv;
 	struct cnss_wlan_driver *driver_ops;
 
 	if (!pci_priv)
 		return -EAGAIN;
 
-	plat_priv = pci_priv->plat_priv;
-	if (!plat_priv)
-		return -EAGAIN;
-
 	if (pci_priv->pci_link_down_ind) {
 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
 		return -EAGAIN;
@@ -539,7 +1037,7 @@
 
 	cnss_pr_dbg("Runtime suspend start\n");
 
-	driver_ops = plat_priv->driver_ops;
+	driver_ops = pci_priv->driver_ops;
 	if (driver_ops && driver_ops->runtime_ops &&
 	    driver_ops->runtime_ops->runtime_suspend)
 		ret = driver_ops->runtime_ops->runtime_suspend(pci_dev);
@@ -554,16 +1052,11 @@
 	int ret = 0;
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
-	struct cnss_plat_data *plat_priv;
 	struct cnss_wlan_driver *driver_ops;
 
 	if (!pci_priv)
 		return -EAGAIN;
 
-	plat_priv = pci_priv->plat_priv;
-	if (!plat_priv)
-		return -EAGAIN;
-
 	if (pci_priv->pci_link_down_ind) {
 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
 		return -EAGAIN;
@@ -571,7 +1064,7 @@
 
 	cnss_pr_dbg("Runtime resume start\n");
 
-	driver_ops = plat_priv->driver_ops;
+	driver_ops = pci_priv->driver_ops;
 	if (driver_ops && driver_ops->runtime_ops &&
 	    driver_ops->runtime_ops->runtime_resume)
 		ret = driver_ops->runtime_ops->runtime_resume(pci_dev);
@@ -592,18 +1085,7 @@
 
 int cnss_wlan_pm_control(struct device *dev, bool vote)
 {
-	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
-	struct cnss_pci_data *pci_priv;
-	struct pci_dev *pci_dev;
-
-	if (!plat_priv)
-		return -ENODEV;
-
-	pci_priv = plat_priv->bus_priv;
-	if (!pci_priv)
-		return -ENODEV;
-
-	pci_dev = pci_priv->pci_dev;
+	struct pci_dev *pci_dev = to_pci_dev(dev);
 
 	return msm_pcie_pm_control(vote ? MSM_PCIE_DISABLE_PC :
 				   MSM_PCIE_ENABLE_PC,
@@ -615,19 +1097,17 @@
 int cnss_auto_suspend(struct device *dev)
 {
 	int ret = 0;
-	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
-	struct pci_dev *pci_dev;
-	struct cnss_pci_data *pci_priv;
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+	struct cnss_plat_data *plat_priv;
 	struct cnss_bus_bw_info *bus_bw_info;
 
-	if (!plat_priv)
-		return -ENODEV;
-
-	pci_priv = plat_priv->bus_priv;
 	if (!pci_priv)
 		return -ENODEV;
 
-	pci_dev = pci_priv->pci_dev;
+	plat_priv = pci_priv->plat_priv;
+	if (!plat_priv)
+		return -ENODEV;
 
 	if (pci_priv->pci_link_state) {
 		if (cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND)) {
@@ -673,19 +1153,18 @@
 int cnss_auto_resume(struct device *dev)
 {
 	int ret = 0;
-	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
-	struct pci_dev *pci_dev;
-	struct cnss_pci_data *pci_priv;
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+	struct cnss_plat_data *plat_priv;
 	struct cnss_bus_bw_info *bus_bw_info;
 
-	if (!plat_priv)
-		return -ENODEV;
-
-	pci_priv = plat_priv->bus_priv;
 	if (!pci_priv)
 		return -ENODEV;
 
-	pci_dev = pci_priv->pci_dev;
+	plat_priv = pci_priv->plat_priv;
+	if (!plat_priv)
+		return -ENODEV;
+
 	if (!pci_priv->pci_link_state) {
 		cnss_pr_dbg("Resuming PCI link\n");
 		if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) {
@@ -830,18 +1309,59 @@
 	m3_mem->size = 0;
 }
 
-int cnss_pci_get_bar_info(struct cnss_pci_data *pci_priv, void __iomem **va,
-			  phys_addr_t *pa)
+int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
 {
+	int ret;
+	struct cnss_plat_data *plat_priv;
+
 	if (!pci_priv)
 		return -ENODEV;
 
-	*va = pci_priv->bar;
-	*pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
+	plat_priv = pci_priv->plat_priv;
+	if (!plat_priv)
+		return -ENODEV;
+
+	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
+	if (ret) {
+		cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
+		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
+				       CNSS_REASON_DEFAULT);
+		return 0;
+	}
+
+	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
+		mod_timer(&plat_priv->fw_boot_timer,
+			  jiffies + msecs_to_jiffies(FW_ASSERT_TIMEOUT));
+	}
 
 	return 0;
 }
 
+void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv)
+{
+	if (!pci_priv)
+		return;
+
+	cnss_pr_err("Timeout waiting for FW ready indication\n");
+
+	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
+			       CNSS_REASON_TIMEOUT);
+}
+
+int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
+{
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	info->va = pci_priv->bar;
+	info->pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_get_soc_info);
+
 static struct cnss_msi_config msi_config = {
 	.total_vectors = 32,
 	.total_users = 4,
@@ -927,7 +1447,7 @@
 				 int *num_vectors, u32 *user_base_data,
 				 u32 *base_vector)
 {
-	struct cnss_pci_data *pci_priv = dev_get_drvdata(dev);
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
 	struct cnss_msi_config *msi_config;
 	int idx;
 
@@ -986,6 +1506,25 @@
 }
 EXPORT_SYMBOL(cnss_get_msi_address);
 
+u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv)
+{
+	int ret, num_vectors;
+	u32 user_base_data, base_vector;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
+					   WAKE_MSI_NAME, &num_vectors,
+					   &user_base_data, &base_vector);
+	if (ret) {
+		cnss_pr_err("WAKE MSI is not valid\n");
+		return 0;
+	}
+
+	return user_base_data;
+}
+
 static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
 {
 	int ret = 0;
@@ -1227,9 +1766,9 @@
 
 	cnss_pr_dbg("MHI status cb is called with reason %d\n", reason);
 
-	if (plat_priv->driver_ops && plat_priv->driver_ops->update_status)
-		plat_priv->driver_ops->update_status(pci_priv->pci_dev,
-						     CNSS_FW_DOWN);
+	if (pci_priv->driver_ops && pci_priv->driver_ops->update_status)
+		pci_priv->driver_ops->update_status(pci_priv->pci_dev,
+						    CNSS_FW_DOWN);
 
 	switch (reason) {
 	case MHI_CB_EE_RDDM:
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 4b2f6bc..79f66ac 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -21,16 +21,6 @@
 
 #include "main.h"
 
-#define QCA6174_VENDOR_ID		0x168C
-#define QCA6174_DEVICE_ID		0x003E
-#define QCA6174_REV_ID_OFFSET		0x08
-#define QCA6174_REV3_VERSION		0x5020000
-#define QCA6174_REV3_2_VERSION		0x5030000
-#define QCA6290_VENDOR_ID		0x17CB
-#define QCA6290_DEVICE_ID		0x1100
-#define QCA6290_EMULATION_VENDOR_ID	0x168C
-#define QCA6290_EMULATION_DEVICE_ID	0xABCD
-
 enum cnss_mhi_state {
 	CNSS_MHI_INIT,
 	CNSS_MHI_DEINIT,
@@ -61,6 +51,7 @@
 	const struct pci_device_id *pci_device_id;
 	u32 device_id;
 	u16 revision_id;
+	struct cnss_wlan_driver *driver_ops;
 	bool pci_link_state;
 	bool pci_link_down_ind;
 	struct pci_saved_state *saved_state;
@@ -130,8 +121,6 @@
 void cnss_pci_deinit(struct cnss_plat_data *plat_priv);
 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv);
 int cnss_pci_load_m3(struct cnss_pci_data *pci_priv);
-int cnss_pci_get_bar_info(struct cnss_pci_data *pci_priv, void __iomem **va,
-			  phys_addr_t *pa);
 int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
 			   enum cnss_mhi_state state);
 int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv);
@@ -139,5 +128,18 @@
 void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic);
 void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv);
 int cnss_pm_request_resume(struct cnss_pci_data *pci_priv);
+u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv);
+int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv);
+void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv);
+int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv);
+int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv);
+int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv);
+int cnss_pci_dev_shutdown(struct cnss_pci_data *pci_priv);
+int cnss_pci_dev_crash_shutdown(struct cnss_pci_data *pci_priv);
+int cnss_pci_dev_ramdump(struct cnss_pci_data *pci_priv);
+int cnss_pci_register_driver_hdlr(struct cnss_pci_data *pci_priv, void *data);
+int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv);
+int cnss_pci_call_driver_modem_status(struct cnss_pci_data *pci_priv,
+				      int modem_current_status);
 
 #endif /* _CNSS_PCI_H */
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index b8777c1..222a131 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -15,8 +15,9 @@
 #include <linux/qmi_encdec.h>
 #include <soc/qcom/msm_qmi_interface.h>
 
-#include "main.h"
+#include "bus.h"
 #include "debug.h"
+#include "main.h"
 #include "qmi.h"
 
 #define WLFW_SERVICE_INS_ID_V01		1
@@ -163,7 +164,7 @@
 	req.num_clients = daemon_support ? 2 : 1;
 	cnss_pr_dbg("Number of clients is %d\n", req.num_clients);
 
-	req.wake_msi = cnss_get_wake_msi(plat_priv);
+	req.wake_msi = cnss_bus_get_wake_irq(plat_priv);
 	if (req.wake_msi) {
 		cnss_pr_dbg("WAKE MSI base data is %d\n", req.wake_msi);
 		req.wake_msi_valid = 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index f1231c0..0bffade 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -2585,6 +2585,10 @@
 
 		/* enable beacon filtering */
 		WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+
+		iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+				     false);
+
 		ret = 0;
 	} else if (old_state == IEEE80211_STA_AUTHORIZED &&
 		   new_state == IEEE80211_STA_ASSOC) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 0aea476..f251c2a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2709,7 +2709,8 @@
 				struct ieee80211_sta *sta,
 				struct iwl_lq_sta *lq_sta,
 				enum nl80211_band band,
-				struct rs_rate *rate)
+				struct rs_rate *rate,
+				bool init)
 {
 	int i, nentries;
 	unsigned long active_rate;
@@ -2763,14 +2764,25 @@
 	 */
 	if (sta->vht_cap.vht_supported &&
 	    best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
-		switch (sta->bandwidth) {
-		case IEEE80211_STA_RX_BW_160:
-		case IEEE80211_STA_RX_BW_80:
-		case IEEE80211_STA_RX_BW_40:
+		/*
+		 * In AP mode, when a new station associates, rs is initialized
+		 * immediately upon association completion, before the phy
+		 * context is updated with the association parameters, so the
+		 * sta bandwidth might be wider than the phy context allows.
+		 * To avoid this issue, always initialize rs with 20mhz
+		 * bandwidth rate, and after authorization, when the phy context
+		 * is already up-to-date, re-init rs with the correct bw.
+		 */
+		u32 bw = init ? RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta);
+
+		switch (bw) {
+		case RATE_MCS_CHAN_WIDTH_40:
+		case RATE_MCS_CHAN_WIDTH_80:
+		case RATE_MCS_CHAN_WIDTH_160:
 			initial_rates = rs_optimal_rates_vht;
 			nentries = ARRAY_SIZE(rs_optimal_rates_vht);
 			break;
-		case IEEE80211_STA_RX_BW_20:
+		case RATE_MCS_CHAN_WIDTH_20:
 			initial_rates = rs_optimal_rates_vht_20mhz;
 			nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
 			break;
@@ -2781,7 +2793,7 @@
 
 		active_rate = lq_sta->active_siso_rate;
 		rate->type = LQ_VHT_SISO;
-		rate->bw = rs_bw_from_sta_bw(sta);
+		rate->bw = bw;
 	} else if (sta->ht_cap.ht_supported &&
 		   best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
 		initial_rates = rs_optimal_rates_ht;
@@ -2863,7 +2875,7 @@
 	tbl = &(lq_sta->lq_info[active_tbl]);
 	rate = &tbl->rate;
 
-	rs_get_initial_rate(mvm, sta, lq_sta, band, rate);
+	rs_get_initial_rate(mvm, sta, lq_sta, band, rate, init);
 	rs_init_optimal_rate(mvm, sta, lq_sta);
 
 	WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index a481eb4..c2bbc8c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -72,6 +72,7 @@
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
 	struct iwl_mvm_key_pn *ptk_pn;
+	int res;
 	u8 tid, keyidx;
 	u8 pn[IEEE80211_CCMP_PN_LEN];
 	u8 *extiv;
@@ -128,12 +129,13 @@
 	pn[4] = extiv[1];
 	pn[5] = extiv[0];
 
-	if (memcmp(pn, ptk_pn->q[queue].pn[tid],
-		   IEEE80211_CCMP_PN_LEN) <= 0)
+	res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
+	if (res < 0)
+		return -1;
+	if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
 		return -1;
 
-	if (!(stats->flag & RX_FLAG_AMSDU_MORE))
-		memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
+	memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
 	stats->flag |= RX_FLAG_PN_VALIDATED;
 
 	return 0;
@@ -295,28 +297,21 @@
 }
 
 /*
- * returns true if a packet outside BA session is a duplicate and
- * should be dropped
+ * returns true if a packet is a duplicate and should be dropped.
+ * Updates AMSDU PN tracking info
  */
-static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
-				  struct ieee80211_rx_status *rx_status,
-				  struct ieee80211_hdr *hdr,
-				  struct iwl_rx_mpdu_desc *desc)
+static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
+			   struct ieee80211_rx_status *rx_status,
+			   struct ieee80211_hdr *hdr,
+			   struct iwl_rx_mpdu_desc *desc)
 {
 	struct iwl_mvm_sta *mvm_sta;
 	struct iwl_mvm_rxq_dup_data *dup_data;
-	u8 baid, tid, sub_frame_idx;
+	u8 tid, sub_frame_idx;
 
 	if (WARN_ON(IS_ERR_OR_NULL(sta)))
 		return false;
 
-	baid = (le32_to_cpu(desc->reorder_data) &
-		IWL_RX_MPDU_REORDER_BAID_MASK) >>
-		IWL_RX_MPDU_REORDER_BAID_SHIFT;
-
-	if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
-		return false;
-
 	mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 	dup_data = &mvm_sta->dup_data[queue];
 
@@ -346,6 +341,12 @@
 		     dup_data->last_sub_frame[tid] >= sub_frame_idx))
 		return true;
 
+	/* Allow same PN as the first subframe for following sub frames */
+	if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
+	    sub_frame_idx > dup_data->last_sub_frame[tid] &&
+	    desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU)
+		rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
+
 	dup_data->last_seq[tid] = hdr->seq_ctrl;
 	dup_data->last_sub_frame[tid] = sub_frame_idx;
 
@@ -882,7 +883,7 @@
 		if (ieee80211_is_data(hdr->frame_control))
 			iwl_mvm_rx_csum(sta, skb, desc);
 
-		if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
+		if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
 			kfree_skb(skb);
 			rcu_read_unlock();
 			return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 7465d4d..bd7ff56 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -406,11 +406,11 @@
 {
 	struct ieee80211_key_conf *keyconf = info->control.hw_key;
 	u8 *crypto_hdr = skb_frag->data + hdrlen;
+	enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
 	u64 pn;
 
 	switch (keyconf->cipher) {
 	case WLAN_CIPHER_SUITE_CCMP:
-	case WLAN_CIPHER_SUITE_CCMP_256:
 		iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
 		iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
 		break;
@@ -434,13 +434,16 @@
 		break;
 	case WLAN_CIPHER_SUITE_GCMP:
 	case WLAN_CIPHER_SUITE_GCMP_256:
+		type = TX_CMD_SEC_GCMP;
+		/* Fall through */
+	case WLAN_CIPHER_SUITE_CCMP_256:
 		/* TODO: Taking the key from the table might introduce a race
 		 * when PTK rekeying is done, having an old packets with a PN
 		 * based on the old key but the message encrypted with a new
 		 * one.
 		 * Need to handle this.
 		 */
-		tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE;
+		tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
 		tx_cmd->key[0] = keyconf->hw_key_idx;
 		iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
 		break;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4182c37..95e9641 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3084,8 +3084,10 @@
 	if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
 		u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
 
-		if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom))
+		if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) {
+			kfree(hwname);
 			return -EINVAL;
+		}
 		param.regd = hwsim_world_regdom_custom[idx];
 	}
 
@@ -3346,8 +3348,11 @@
 			continue;
 
 		list_del(&data->list);
-		INIT_WORK(&data->destroy_work, destroy_radio);
-		schedule_work(&data->destroy_work);
+		spin_unlock_bh(&hwsim_radio_lock);
+		mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
+					 NULL);
+		spin_lock_bh(&hwsim_radio_lock);
+
 	}
 	spin_unlock_bh(&hwsim_radio_lock);
 }
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 1b28786..520050e 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -350,6 +350,9 @@
 	unsigned int i = 0;
 	struct netfront_queue *queue = NULL;
 
+	if (!np->queues)
+		return -ENODEV;
+
 	for (i = 0; i < num_queues; ++i) {
 		queue = &np->queues[i];
 		napi_enable(&queue->napi);
@@ -1377,18 +1380,8 @@
 #ifdef CONFIG_SYSFS
 	info->netdev->sysfs_groups[0] = &xennet_dev_group;
 #endif
-	err = register_netdev(info->netdev);
-	if (err) {
-		pr_warn("%s: register_netdev err=%d\n", __func__, err);
-		goto fail;
-	}
 
 	return 0;
-
- fail:
-	xennet_free_netdev(netdev);
-	dev_set_drvdata(&dev->dev, NULL);
-	return err;
 }
 
 static void xennet_end_access(int ref, void *page)
@@ -1757,8 +1750,6 @@
 {
 	unsigned int i;
 
-	rtnl_lock();
-
 	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
 		struct netfront_queue *queue = &info->queues[i];
 
@@ -1767,8 +1758,6 @@
 		netif_napi_del(&queue->napi);
 	}
 
-	rtnl_unlock();
-
 	kfree(info->queues);
 	info->queues = NULL;
 }
@@ -1784,8 +1773,6 @@
 	if (!info->queues)
 		return -ENOMEM;
 
-	rtnl_lock();
-
 	for (i = 0; i < *num_queues; i++) {
 		struct netfront_queue *queue = &info->queues[i];
 
@@ -1794,7 +1781,7 @@
 
 		ret = xennet_init_queue(queue);
 		if (ret < 0) {
-			dev_warn(&info->netdev->dev,
+			dev_warn(&info->xbdev->dev,
 				 "only created %d queues\n", i);
 			*num_queues = i;
 			break;
@@ -1808,10 +1795,8 @@
 
 	netif_set_real_num_tx_queues(info->netdev, *num_queues);
 
-	rtnl_unlock();
-
 	if (*num_queues == 0) {
-		dev_err(&info->netdev->dev, "no queues\n");
+		dev_err(&info->xbdev->dev, "no queues\n");
 		return -EINVAL;
 	}
 	return 0;
@@ -1853,6 +1838,7 @@
 		goto out;
 	}
 
+	rtnl_lock();
 	if (info->queues)
 		xennet_destroy_queues(info);
 
@@ -1863,6 +1849,7 @@
 		info->queues = NULL;
 		goto out;
 	}
+	rtnl_unlock();
 
 	/* Create shared ring, alloc event channel -- for each queue */
 	for (i = 0; i < num_queues; ++i) {
@@ -1959,8 +1946,10 @@
 	xenbus_transaction_end(xbt, 1);
  destroy_ring:
 	xennet_disconnect_backend(info);
+	rtnl_lock();
 	xennet_destroy_queues(info);
  out:
+	rtnl_unlock();
 	device_unregister(&dev->dev);
 	return err;
 }
@@ -1996,6 +1985,15 @@
 	netdev_update_features(dev);
 	rtnl_unlock();
 
+	if (dev->reg_state == NETREG_UNINITIALIZED) {
+		err = register_netdev(dev);
+		if (err) {
+			pr_warn("%s: register_netdev err=%d\n", __func__, err);
+			device_unregister(&np->xbdev->dev);
+			return err;
+		}
+	}
+
 	/*
 	 * All public and private state should now be sane.  Get
 	 * ready to start sending and receiving packets and give the driver
@@ -2186,10 +2184,14 @@
 
 	xennet_disconnect_backend(info);
 
-	unregister_netdev(info->netdev);
+	if (info->netdev->reg_state == NETREG_REGISTERED)
+		unregister_netdev(info->netdev);
 
-	if (info->queues)
+	if (info->queues) {
+		rtnl_lock();
 		xennet_destroy_queues(info);
+		rtnl_unlock();
+	}
 	xennet_free_netdev(info->netdev);
 
 	return 0;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 24222a5..da95bd8 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -996,6 +996,9 @@
 	mw_base = nt->mw_vec[mw_num].phys_addr;
 	mw_size = nt->mw_vec[mw_num].phys_size;
 
+	if (max_mw_size && mw_size > max_mw_size)
+		mw_size = max_mw_size;
+
 	tx_size = (unsigned int)mw_size / num_qps_mw;
 	qp_offset = tx_size * (qp_num / mw_count);
 
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index eef1a68..b634b89 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -583,8 +583,10 @@
 			opts->discovery_nqn =
 				!(strcmp(opts->subsysnqn,
 					 NVME_DISC_SUBSYS_NAME));
-			if (opts->discovery_nqn)
+			if (opts->discovery_nqn) {
+				opts->kato = 0;
 				opts->nr_io_queues = 0;
+			}
 			break;
 		case NVMF_OPT_TRADDR:
 			p = match_strdup(args);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8cc856e..642ee00 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1120,7 +1120,7 @@
 	nvmeq->cq_vector = qid - 1;
 	result = adapter_alloc_cq(dev, qid, nvmeq);
 	if (result < 0)
-		return result;
+		goto release_vector;
 
 	result = adapter_alloc_sq(dev, qid, nvmeq);
 	if (result < 0)
@@ -1134,9 +1134,12 @@
 	return result;
 
  release_sq:
+	dev->online_queues--;
 	adapter_delete_sq(dev, qid);
  release_cq:
 	adapter_delete_cq(dev, qid);
+ release_vector:
+	nvmeq->cq_vector = -1;
 	return result;
 }
 
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index c89d68a..3a04492 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -491,9 +491,12 @@
 		goto fail;
 	}
 
-	/* either variant of SGLs is fine, as we don't support metadata */
-	if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF &&
-		     (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) {
+	/*
+	 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
+	 * contains an address of a single contiguous physical buffer that is
+	 * byte aligned.
+	 */
+	if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
 		goto fail;
 	}
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 52a297d..0a963b1 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -46,10 +46,20 @@
 config OF_PROMTREE
 	bool
 
+config OF_KOBJ
+	bool "Display devicetree in sysfs"
+	def_bool SYSFS
+	help
+	  Some embedded platforms have no need to display the devicetree
+	  nodes and properties in sysfs. Disabling this option will save
+	  a small amount of memory, as well as decrease boot time. By
+	  default this option will be enabled if SYSFS is enabled.
+
 # Hardly any platforms need this.  It is safe to select, but only do so if you
 # need it.
 config OF_DYNAMIC
 	bool "Support for dynamic device trees" if OF_UNITTEST
+	select OF_KOBJ
 	help
 	  On some platforms, the device tree can be manipulated at runtime.
 	  While this option is selected automatically on such platforms, you
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index b2f474a..760b730 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -1,4 +1,5 @@
 obj-y = base.o device.o platform.o
+obj-$(CONFIG_OF_KOBJ) += kobj.o
 obj-$(CONFIG_OF_DYNAMIC) += dynamic.o
 obj-$(CONFIG_OF_FLATTREE) += fdt.o
 obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 23a6d36..9a6fad6 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -98,108 +98,6 @@
 }
 #endif
 
-#ifndef CONFIG_OF_DYNAMIC
-static void of_node_release(struct kobject *kobj)
-{
-	/* Without CONFIG_OF_DYNAMIC, no nodes gets freed */
-}
-#endif /* CONFIG_OF_DYNAMIC */
-
-struct kobj_type of_node_ktype = {
-	.release = of_node_release,
-};
-
-static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
-				struct bin_attribute *bin_attr, char *buf,
-				loff_t offset, size_t count)
-{
-	struct property *pp = container_of(bin_attr, struct property, attr);
-	return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
-}
-
-/* always return newly allocated name, caller must free after use */
-static const char *safe_name(struct kobject *kobj, const char *orig_name)
-{
-	const char *name = orig_name;
-	struct kernfs_node *kn;
-	int i = 0;
-
-	/* don't be a hero. After 16 tries give up */
-	while (i < 16 && (kn = sysfs_get_dirent(kobj->sd, name))) {
-		sysfs_put(kn);
-		if (name != orig_name)
-			kfree(name);
-		name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
-	}
-
-	if (name == orig_name) {
-		name = kstrdup(orig_name, GFP_KERNEL);
-	} else {
-		pr_warn("Duplicate name in %s, renamed to \"%s\"\n",
-			kobject_name(kobj), name);
-	}
-	return name;
-}
-
-int __of_add_property_sysfs(struct device_node *np, struct property *pp)
-{
-	int rc;
-
-	/* Important: Don't leak passwords */
-	bool secure = strncmp(pp->name, "security-", 9) == 0;
-
-	if (!IS_ENABLED(CONFIG_SYSFS))
-		return 0;
-
-	if (!of_kset || !of_node_is_attached(np))
-		return 0;
-
-	sysfs_bin_attr_init(&pp->attr);
-	pp->attr.attr.name = safe_name(&np->kobj, pp->name);
-	pp->attr.attr.mode = secure ? S_IRUSR : S_IRUGO;
-	pp->attr.size = secure ? 0 : pp->length;
-	pp->attr.read = of_node_property_read;
-
-	rc = sysfs_create_bin_file(&np->kobj, &pp->attr);
-	WARN(rc, "error adding attribute %s to node %s\n", pp->name, np->full_name);
-	return rc;
-}
-
-int __of_attach_node_sysfs(struct device_node *np)
-{
-	const char *name;
-	struct kobject *parent;
-	struct property *pp;
-	int rc;
-
-	if (!IS_ENABLED(CONFIG_SYSFS))
-		return 0;
-
-	if (!of_kset)
-		return 0;
-
-	np->kobj.kset = of_kset;
-	if (!np->parent) {
-		/* Nodes without parents are new top level trees */
-		name = safe_name(&of_kset->kobj, "base");
-		parent = NULL;
-	} else {
-		name = safe_name(&np->parent->kobj, kbasename(np->full_name));
-		parent = &np->parent->kobj;
-	}
-	if (!name)
-		return -ENOMEM;
-	rc = kobject_add(&np->kobj, parent, "%s", name);
-	kfree(name);
-	if (rc)
-		return rc;
-
-	for_each_property_of_node(np, pp)
-		__of_add_property_sysfs(np, pp);
-
-	return 0;
-}
-
 static struct device_node **phandle_cache;
 static u32 phandle_cache_mask;
 
@@ -2021,22 +1919,6 @@
 	return 0;
 }
 
-void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop)
-{
-	sysfs_remove_bin_file(&np->kobj, &prop->attr);
-	kfree(prop->attr.attr.name);
-}
-
-void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
-{
-	if (!IS_ENABLED(CONFIG_SYSFS))
-		return;
-
-	/* at early boot, bail here and defer setup to of_init() */
-	if (of_kset && of_node_is_attached(np))
-		__of_sysfs_remove_bin_file(np, prop);
-}
-
 /**
  * of_remove_property - Remove a property from a node.
  *
@@ -2096,21 +1978,6 @@
 	return 0;
 }
 
-void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
-		struct property *oldprop)
-{
-	if (!IS_ENABLED(CONFIG_SYSFS))
-		return;
-
-	/* At early boot, bail out and defer setup to of_init() */
-	if (!of_kset)
-		return;
-
-	if (oldprop)
-		__of_sysfs_remove_bin_file(np, oldprop);
-	__of_add_property_sysfs(np, newprop);
-}
-
 /*
  * of_update_property - Update a property in a node, if the property does
  * not exist, add it.
@@ -2218,7 +2085,7 @@
 			continue;
 
 		/* Allocate an alias_prop with enough space for the stem */
-		ap = dt_alloc(sizeof(*ap) + len + 1, 4);
+		ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
 		if (!ap)
 			continue;
 		memset(ap, 0, sizeof(*ap) + len + 1);
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 888fdbc..765ba6e 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -16,6 +16,11 @@
 
 #include "of_private.h"
 
+static struct device_node *kobj_to_device_node(struct kobject *kobj)
+{
+	return container_of(kobj, struct device_node, kobj);
+}
+
 /**
  * of_node_get() - Increment refcount of a node
  * @node:	Node to inc refcount, NULL is supported to simplify writing of
@@ -43,28 +48,6 @@
 }
 EXPORT_SYMBOL(of_node_put);
 
-void __of_detach_node_sysfs(struct device_node *np)
-{
-	struct property *pp;
-
-	if (!IS_ENABLED(CONFIG_SYSFS))
-		return;
-
-	BUG_ON(!of_node_is_initialized(np));
-	if (!of_kset)
-		return;
-
-	/* only remove properties if on sysfs */
-	if (of_node_is_attached(np)) {
-		for_each_property_of_node(np, pp)
-			__of_sysfs_remove_bin_file(np, pp);
-		kobject_del(&np->kobj);
-	}
-
-	/* finally remove the kobj_init ref */
-	of_node_put(np);
-}
-
 static BLOCKING_NOTIFIER_HEAD(of_reconfig_chain);
 
 int of_reconfig_notifier_register(struct notifier_block *nb)
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 755b386..744f625 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -937,7 +937,7 @@
 	int offset;
 	const char *p, *q, *options = NULL;
 	int l;
-	const struct earlycon_id *match;
+	const struct earlycon_id **p_match;
 	const void *fdt = initial_boot_params;
 
 	offset = fdt_path_offset(fdt, "/chosen");
@@ -964,7 +964,10 @@
 		return 0;
 	}
 
-	for (match = __earlycon_table; match < __earlycon_table_end; match++) {
+	for (p_match = __earlycon_table; p_match < __earlycon_table_end;
+	     p_match++) {
+		const struct earlycon_id *match = *p_match;
+
 		if (!match->compatible[0])
 			continue;
 
diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
new file mode 100644
index 0000000..662f79e
--- /dev/null
+++ b/drivers/of/kobj.c
@@ -0,0 +1,165 @@
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "of_private.h"
+
+/* true when node is initialized */
+static int of_node_is_initialized(struct device_node *node)
+{
+	return node && node->kobj.state_initialized;
+}
+
+/* true when node is attached (i.e. present on sysfs) */
+int of_node_is_attached(struct device_node *node)
+{
+	return node && node->kobj.state_in_sysfs;
+}
+
+
+#ifndef CONFIG_OF_DYNAMIC
+static void of_node_release(struct kobject *kobj)
+{
+	/* Without CONFIG_OF_DYNAMIC, no nodes gets freed */
+}
+#endif /* CONFIG_OF_DYNAMIC */
+
+struct kobj_type of_node_ktype = {
+	.release = of_node_release,
+};
+
+static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
+				struct bin_attribute *bin_attr, char *buf,
+				loff_t offset, size_t count)
+{
+	struct property *pp = container_of(bin_attr, struct property, attr);
+	return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
+}
+
+/* always return newly allocated name, caller must free after use */
+static const char *safe_name(struct kobject *kobj, const char *orig_name)
+{
+	const char *name = orig_name;
+	struct kernfs_node *kn;
+	int i = 0;
+
+	/* don't be a hero. After 16 tries give up */
+	while (i < 16 && name && (kn = sysfs_get_dirent(kobj->sd, name))) {
+		sysfs_put(kn);
+		if (name != orig_name)
+			kfree(name);
+		name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
+	}
+
+	if (name == orig_name) {
+		name = kstrdup(orig_name, GFP_KERNEL);
+	} else {
+		pr_warn("Duplicate name in %s, renamed to \"%s\"\n",
+			kobject_name(kobj), name);
+	}
+	return name;
+}
+
+int __of_add_property_sysfs(struct device_node *np, struct property *pp)
+{
+	int rc;
+
+	/* Important: Don't leak passwords */
+	bool secure = strncmp(pp->name, "security-", 9) == 0;
+
+	if (!IS_ENABLED(CONFIG_SYSFS))
+		return 0;
+
+	if (!of_kset || !of_node_is_attached(np))
+		return 0;
+
+	sysfs_bin_attr_init(&pp->attr);
+	pp->attr.attr.name = safe_name(&np->kobj, pp->name);
+	pp->attr.attr.mode = secure ? 0400 : 0444;
+	pp->attr.size = secure ? 0 : pp->length;
+	pp->attr.read = of_node_property_read;
+
+	rc = sysfs_create_bin_file(&np->kobj, &pp->attr);
+	WARN(rc, "error adding attribute %s to node %s\n", pp->name,
+		np->full_name);
+	return rc;
+}
+
+void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop)
+{
+	if (!IS_ENABLED(CONFIG_SYSFS))
+		return;
+
+	sysfs_remove_bin_file(&np->kobj, &prop->attr);
+	kfree(prop->attr.attr.name);
+}
+
+void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
+{
+	/* at early boot, bail here and defer setup to of_init() */
+	if (of_kset && of_node_is_attached(np))
+		__of_sysfs_remove_bin_file(np, prop);
+}
+
+void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
+		struct property *oldprop)
+{
+	/* At early boot, bail out and defer setup to of_init() */
+	if (!of_kset)
+		return;
+
+	if (oldprop)
+		__of_sysfs_remove_bin_file(np, oldprop);
+	__of_add_property_sysfs(np, newprop);
+}
+
+int __of_attach_node_sysfs(struct device_node *np)
+{
+	const char *name;
+	struct kobject *parent;
+	struct property *pp;
+	int rc;
+
+	if (!of_kset)
+		return 0;
+
+	np->kobj.kset = of_kset;
+	if (!np->parent) {
+		/* Nodes without parents are new top level trees */
+		name = safe_name(&of_kset->kobj, "base");
+		parent = NULL;
+	} else {
+		name = safe_name(&np->parent->kobj, kbasename(np->full_name));
+		parent = &np->parent->kobj;
+	}
+	if (!name)
+		return -ENOMEM;
+	rc = kobject_add(&np->kobj, parent, "%s", name);
+	kfree(name);
+	if (rc)
+		return rc;
+
+	for_each_property_of_node(np, pp)
+		__of_add_property_sysfs(np, pp);
+
+	return 0;
+}
+
+void __of_detach_node_sysfs(struct device_node *np)
+{
+	struct property *pp;
+
+	BUG_ON(!of_node_is_initialized(np));
+	if (!of_kset)
+		return;
+
+	/* only remove properties if on sysfs */
+	if (of_node_is_attached(np)) {
+		for_each_property_of_node(np, pp)
+			__of_sysfs_remove_bin_file(np, pp);
+		kobject_del(&np->kobj);
+	}
+
+	/* finally remove the kobj_init ref */
+	of_node_put(np);
+}
+
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index c4d7fdc..eb811185 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -35,12 +35,6 @@
 extern struct list_head aliases_lookup;
 extern struct kset *of_kset;
 
-
-static inline struct device_node *kobj_to_device_node(struct kobject *kobj)
-{
-	return container_of(kobj, struct device_node, kobj);
-}
-
 #if defined(CONFIG_OF_DYNAMIC)
 extern int of_property_notify(int action, struct device_node *np,
 			      struct property *prop, struct property *old_prop);
@@ -55,6 +49,29 @@
 }
 #endif /* CONFIG_OF_DYNAMIC */
 
+#if defined(CONFIG_OF_KOBJ)
+int of_node_is_attached(struct device_node *node);
+int __of_add_property_sysfs(struct device_node *np, struct property *pp);
+void __of_remove_property_sysfs(struct device_node *np, struct property *prop);
+void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
+		struct property *oldprop);
+int __of_attach_node_sysfs(struct device_node *np);
+void __of_detach_node_sysfs(struct device_node *np);
+#else
+static inline int __of_add_property_sysfs(struct device_node *np, struct property *pp)
+{
+	return 0;
+}
+static inline void __of_remove_property_sysfs(struct device_node *np, struct property *prop) {}
+static inline void __of_update_property_sysfs(struct device_node *np,
+		struct property *newprop, struct property *oldprop) {}
+static inline int __of_attach_node_sysfs(struct device_node *np)
+{
+	return 0;
+}
+static inline void __of_detach_node_sysfs(struct device_node *np) {}
+#endif
+
 /**
  * General utilities for working with live trees.
  *
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 1cced1d..7e93858 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1367,9 +1367,27 @@
 		WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG);
 	}
 
-	/* Set HF mode as the default (vs. -1 mode). */
+
+	/*
+	 * Hard Fail vs. Soft Fail on PCI "Master Abort".
+	 *
+	 * "Master Abort" means the MMIO transaction timed out - usually due to
+	 * the device not responding to an MMIO read. We would like HF to be
+	 * enabled to find driver problems, though it means the system will
+	 * crash with a HPMC.
+	 *
+	 * In SoftFail mode "~0L" is returned as a result of a timeout on the
+	 * pci bus. This is like how PCI busses on x86 and most other
+	 * architectures behave.  In order to increase compatibility with
+	 * existing (x86) PCI hardware and existing Linux drivers we enable
+	 * Soft Faul mode on PA-RISC now too.
+	 */
         stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL);
+#if defined(ENABLE_HARDFAIL)
 	WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
+#else
+	WRITE_REG32(stat & ~HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
+#endif
 
 	/*
 	** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 4fce494..11bad82 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -32,6 +32,7 @@
 #define     PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT	5
 #define     PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE		(0 << 11)
 #define     PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT	12
+#define     PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ		0x2
 #define PCIE_CORE_LINK_CTRL_STAT_REG				0xd0
 #define     PCIE_CORE_LINK_L0S_ENTRY				BIT(0)
 #define     PCIE_CORE_LINK_TRAINING				BIT(5)
@@ -175,8 +176,6 @@
 #define PCIE_CONFIG_WR_TYPE0			0xa
 #define PCIE_CONFIG_WR_TYPE1			0xb
 
-/* PCI_BDF shifts 8bit, so we need extra 4bit shift */
-#define PCIE_BDF(dev)				(dev << 4)
 #define PCIE_CONF_BUS(bus)			(((bus) & 0xff) << 20)
 #define PCIE_CONF_DEV(dev)			(((dev) & 0x1f) << 15)
 #define PCIE_CONF_FUNC(fun)			(((fun) & 0x7)	<< 12)
@@ -298,7 +297,8 @@
 	reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
 		(7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
 		PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
-		PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT;
+		(PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
+		 PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
 	advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
 
 	/* Program PCIe Control 2 to disable strict ordering */
@@ -439,7 +439,7 @@
 	u32 reg;
 	int ret;
 
-	if (PCI_SLOT(devfn) != 0) {
+	if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) {
 		*val = 0xffffffff;
 		return PCIBIOS_DEVICE_NOT_FOUND;
 	}
@@ -458,7 +458,7 @@
 	advk_writel(pcie, reg, PIO_CTRL);
 
 	/* Program the address registers */
-	reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where);
+	reg = PCIE_CONF_ADDR(bus->number, devfn, where);
 	advk_writel(pcie, reg, PIO_ADDR_LS);
 	advk_writel(pcie, 0, PIO_ADDR_MS);
 
@@ -493,7 +493,7 @@
 	int offset;
 	int ret;
 
-	if (PCI_SLOT(devfn) != 0)
+	if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
 		return PCIBIOS_DEVICE_NOT_FOUND;
 
 	if (where % size)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index d81ad84..f11c382 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1147,11 +1147,14 @@
 	int error;
 
 	/*
-	 * If pci_dev->driver is not set (unbound), the device should
-	 * always remain in D0 regardless of the runtime PM status
+	 * If pci_dev->driver is not set (unbound), we leave the device in D0,
+	 * but it may go to D3cold when the bridge above it runtime suspends.
+	 * Save its config space in case that happens.
 	 */
-	if (!pci_dev->driver)
+	if (!pci_dev->driver) {
+		pci_save_state(pci_dev);
 		return 0;
+	}
 
 	if (!pm || !pm->runtime_suspend)
 		return -ENOSYS;
@@ -1199,16 +1202,18 @@
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
 	/*
-	 * If pci_dev->driver is not set (unbound), the device should
-	 * always remain in D0 regardless of the runtime PM status
+	 * Restoring config space is necessary even if the device is not bound
+	 * to a driver because although we left it in D0, it may have gone to
+	 * D3cold when the bridge above it runtime suspended.
 	 */
+	pci_restore_standard_config(pci_dev);
+
 	if (!pci_dev->driver)
 		return 0;
 
 	if (!pm || !pm->runtime_resume)
 		return -ENOSYS;
 
-	pci_restore_standard_config(pci_dev);
 	pci_fixup_device(pci_fixup_resume_early, pci_dev);
 	__pci_enable_wake(pci_dev, PCI_D0, true, false);
 	pci_fixup_device(pci_fixup_resume, pci_dev);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a87c8e1..9c13aee 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3756,27 +3756,49 @@
 }
 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
 
-/*
- * We should only need to wait 100ms after FLR, but some devices take longer.
- * Wait for up to 1000ms for config space to return something other than -1.
- * Intel IGD requires this when an LCD panel is attached.  We read the 2nd
- * dword because VFs don't implement the 1st dword.
- */
 static void pci_flr_wait(struct pci_dev *dev)
 {
-	int i = 0;
+	int delay = 1, timeout = 60000;
 	u32 id;
 
-	do {
-		msleep(100);
-		pci_read_config_dword(dev, PCI_COMMAND, &id);
-	} while (i++ < 10 && id == ~0);
+	/*
+	 * Per PCIe r3.1, sec 6.6.2, a device must complete an FLR within
+	 * 100ms, but may silently discard requests while the FLR is in
+	 * progress.  Wait 100ms before trying to access the device.
+	 */
+	msleep(100);
 
-	if (id == ~0)
-		dev_warn(&dev->dev, "Failed to return from FLR\n");
-	else if (i > 1)
-		dev_info(&dev->dev, "Required additional %dms to return from FLR\n",
-			 (i - 1) * 100);
+	/*
+	 * After 100ms, the device should not silently discard config
+	 * requests, but it may still indicate that it needs more time by
+	 * responding to them with CRS completions.  The Root Port will
+	 * generally synthesize ~0 data to complete the read (except when
+	 * CRS SV is enabled and the read was for the Vendor ID; in that
+	 * case it synthesizes 0x0001 data).
+	 *
+	 * Wait for the device to return a non-CRS completion.  Read the
+	 * Command register instead of Vendor ID so we don't have to
+	 * contend with the CRS SV value.
+	 */
+	pci_read_config_dword(dev, PCI_COMMAND, &id);
+	while (id == ~0) {
+		if (delay > timeout) {
+			dev_warn(&dev->dev, "not ready %dms after FLR; giving up\n",
+				 100 + delay - 1);
+			return;
+		}
+
+		if (delay > 1000)
+			dev_info(&dev->dev, "not ready %dms after FLR; waiting\n",
+				 100 + delay - 1);
+
+		msleep(delay);
+		delay *= 2;
+		pci_read_config_dword(dev, PCI_COMMAND, &id);
+	}
+
+	if (delay > 1000)
+		dev_info(&dev->dev, "ready %dms after FLR\n", 100 + delay - 1);
 }
 
 static int pcie_flr(struct pci_dev *dev, int probe)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index fb177dc..b55f9179 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3857,6 +3857,8 @@
 			 quirk_dma_func1_alias);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
 			 quirk_dma_func1_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
+			 quirk_dma_func1_alias);
 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
 			 quirk_dma_func1_alias);
@@ -3872,6 +3874,9 @@
 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
 			 quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
+			 quirk_dma_func1_alias);
 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
 			 quirk_dma_func1_alias);
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index df02f98..37df1bf 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -1128,6 +1128,7 @@
 	armpmu_init(pmu);
 
 	pmu->plat_device = pdev;
+	platform_set_drvdata(pdev, pmu);
 
 	if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
 		init_fn = of_id->data;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index df63b7d..b40a074 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -368,18 +368,6 @@
 	writel(value, padcfg0);
 }
 
-static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
-{
-	u32 value;
-
-	/* Put the pad into GPIO mode */
-	value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
-	/* Disable SCI/SMI/NMI generation */
-	value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
-	value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
-	writel(value, padcfg0);
-}
-
 static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
 				     struct pinctrl_gpio_range *range,
 				     unsigned pin)
@@ -387,6 +375,7 @@
 	struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
 	void __iomem *padcfg0;
 	unsigned long flags;
+	u32 value;
 
 	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
@@ -396,7 +385,13 @@
 	}
 
 	padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
-	intel_gpio_set_gpio_mode(padcfg0);
+	/* Put the pad into GPIO mode */
+	value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
+	/* Disable SCI/SMI/NMI generation */
+	value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
+	value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
+	writel(value, padcfg0);
+
 	/* Disable TX buffer and enable RX (this will be input) */
 	__intel_gpio_set_direction(padcfg0, true);
 
@@ -775,8 +770,6 @@
 
 	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
-	intel_gpio_set_gpio_mode(reg);
-
 	value = readl(reg);
 
 	value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 0991a99..bdce49b 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014, 2016-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2016-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -529,7 +529,7 @@
 			pad->pullup = arg;
 			break;
 		case PMIC_GPIO_CONF_STRENGTH:
-			if (arg > PMIC_GPIO_STRENGTH_LOW)
+			if (arg > PMIC_GPIO_STRENGTH_HIGH)
 				return -EINVAL;
 			pad->strength = arg;
 			break;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index d3f5501d..a562ed7 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2015, Sony Mobile Communications AB.
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -365,7 +365,7 @@
 			banks |= BIT(0);
 			break;
 		case PM8XXX_QCOM_DRIVE_STRENGH:
-			if (arg > PMIC_GPIO_STRENGTH_LOW) {
+			if (arg > PM8921_GPIO_STRENGTH_LOW) {
 				dev_err(pctrl->dev, "invalid drive strength\n");
 				return -EINVAL;
 			}
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
index dc9b671..2971888 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -1,7 +1,7 @@
 /*
  * R8A7796 processor support - PFC hardware block.
  *
- * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2016-2017 Renesas Electronics Corp.
  *
  * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7795.c
  *
@@ -476,7 +476,7 @@
 #define MOD_SEL1_26		FM(SEL_TIMER_TMU_0)	FM(SEL_TIMER_TMU_1)
 #define MOD_SEL1_25_24		FM(SEL_SSP1_1_0)	FM(SEL_SSP1_1_1)	FM(SEL_SSP1_1_2)	FM(SEL_SSP1_1_3)
 #define MOD_SEL1_23_22_21	FM(SEL_SSP1_0_0)	FM(SEL_SSP1_0_1)	FM(SEL_SSP1_0_2)	FM(SEL_SSP1_0_3)	FM(SEL_SSP1_0_4)	F_(0, 0)		F_(0, 0)		F_(0, 0)
-#define MOD_SEL1_20		FM(SEL_SSI_0)		FM(SEL_SSI_1)
+#define MOD_SEL1_20		FM(SEL_SSI1_0)		FM(SEL_SSI1_1)
 #define MOD_SEL1_19		FM(SEL_SPEED_PULSE_0)	FM(SEL_SPEED_PULSE_1)
 #define MOD_SEL1_18_17		FM(SEL_SIMCARD_0)	FM(SEL_SIMCARD_1)	FM(SEL_SIMCARD_2)	FM(SEL_SIMCARD_3)
 #define MOD_SEL1_16		FM(SEL_SDHI2_0)		FM(SEL_SDHI2_1)
@@ -1208,7 +1208,7 @@
 	PINMUX_IPSR_GPSR(IP13_11_8,	HSCK0),
 	PINMUX_IPSR_MSEL(IP13_11_8,	MSIOF1_SCK_D,		SEL_MSIOF1_3),
 	PINMUX_IPSR_MSEL(IP13_11_8,	AUDIO_CLKB_A,		SEL_ADG_B_0),
-	PINMUX_IPSR_MSEL(IP13_11_8,	SSI_SDATA1_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP13_11_8,	SSI_SDATA1_B,		SEL_SSI1_1),
 	PINMUX_IPSR_MSEL(IP13_11_8,	TS_SCK0_D,		SEL_TSIF0_3),
 	PINMUX_IPSR_MSEL(IP13_11_8,	STP_ISCLK_0_D,		SEL_SSP1_0_3),
 	PINMUX_IPSR_MSEL(IP13_11_8,	RIF0_CLK_C,		SEL_DRIF0_2),
@@ -1216,14 +1216,14 @@
 
 	PINMUX_IPSR_GPSR(IP13_15_12,	HRX0),
 	PINMUX_IPSR_MSEL(IP13_15_12,	MSIOF1_RXD_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP13_15_12,	SSI_SDATA2_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP13_15_12,	SSI_SDATA2_B,		SEL_SSI2_1),
 	PINMUX_IPSR_MSEL(IP13_15_12,	TS_SDEN0_D,		SEL_TSIF0_3),
 	PINMUX_IPSR_MSEL(IP13_15_12,	STP_ISEN_0_D,		SEL_SSP1_0_3),
 	PINMUX_IPSR_MSEL(IP13_15_12,	RIF0_D0_C,		SEL_DRIF0_2),
 
 	PINMUX_IPSR_GPSR(IP13_19_16,	HTX0),
 	PINMUX_IPSR_MSEL(IP13_19_16,	MSIOF1_TXD_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP13_19_16,	SSI_SDATA9_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP13_19_16,	SSI_SDATA9_B,		SEL_SSI9_1),
 	PINMUX_IPSR_MSEL(IP13_19_16,	TS_SDAT0_D,		SEL_TSIF0_3),
 	PINMUX_IPSR_MSEL(IP13_19_16,	STP_ISD_0_D,		SEL_SSP1_0_3),
 	PINMUX_IPSR_MSEL(IP13_19_16,	RIF0_D1_C,		SEL_DRIF0_2),
@@ -1231,7 +1231,7 @@
 	PINMUX_IPSR_GPSR(IP13_23_20,	HCTS0_N),
 	PINMUX_IPSR_MSEL(IP13_23_20,	RX2_B,			SEL_SCIF2_1),
 	PINMUX_IPSR_MSEL(IP13_23_20,	MSIOF1_SYNC_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP13_23_20,	SSI_SCK9_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP13_23_20,	SSI_SCK9_A,		SEL_SSI9_0),
 	PINMUX_IPSR_MSEL(IP13_23_20,	TS_SPSYNC0_D,		SEL_TSIF0_3),
 	PINMUX_IPSR_MSEL(IP13_23_20,	STP_ISSYNC_0_D,		SEL_SSP1_0_3),
 	PINMUX_IPSR_MSEL(IP13_23_20,	RIF0_SYNC_C,		SEL_DRIF0_2),
@@ -1240,7 +1240,7 @@
 	PINMUX_IPSR_GPSR(IP13_27_24,	HRTS0_N),
 	PINMUX_IPSR_MSEL(IP13_27_24,	TX2_B,			SEL_SCIF2_1),
 	PINMUX_IPSR_MSEL(IP13_27_24,	MSIOF1_SS1_D,		SEL_MSIOF1_3),
-	PINMUX_IPSR_MSEL(IP13_27_24,	SSI_WS9_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP13_27_24,	SSI_WS9_A,		SEL_SSI9_0),
 	PINMUX_IPSR_MSEL(IP13_27_24,	STP_IVCXO27_0_D,	SEL_SSP1_0_3),
 	PINMUX_IPSR_MSEL(IP13_27_24,	BPFCLK_A,		SEL_FM_0),
 	PINMUX_IPSR_GPSR(IP13_27_24,	AUDIO_CLKOUT2_A),
@@ -1255,7 +1255,7 @@
 	PINMUX_IPSR_MSEL(IP14_3_0,	RX5_A,			SEL_SCIF5_0),
 	PINMUX_IPSR_MSEL(IP14_3_0,	NFWP_N_A,		SEL_NDF_0),
 	PINMUX_IPSR_MSEL(IP14_3_0,	AUDIO_CLKA_C,		SEL_ADG_A_2),
-	PINMUX_IPSR_MSEL(IP14_3_0,	SSI_SCK2_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP14_3_0,	SSI_SCK2_A,		SEL_SSI2_0),
 	PINMUX_IPSR_MSEL(IP14_3_0,	STP_IVCXO27_0_C,	SEL_SSP1_0_2),
 	PINMUX_IPSR_GPSR(IP14_3_0,	AUDIO_CLKOUT3_A),
 	PINMUX_IPSR_MSEL(IP14_3_0,	TCLK1_B,		SEL_TIMER_TMU_1),
@@ -1264,7 +1264,7 @@
 	PINMUX_IPSR_MSEL(IP14_7_4,	TX5_A,			SEL_SCIF5_0),
 	PINMUX_IPSR_MSEL(IP14_7_4,	MSIOF1_SS2_D,		SEL_MSIOF1_3),
 	PINMUX_IPSR_MSEL(IP14_7_4,	AUDIO_CLKC_A,		SEL_ADG_C_0),
-	PINMUX_IPSR_MSEL(IP14_7_4,	SSI_WS2_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP14_7_4,	SSI_WS2_A,		SEL_SSI2_0),
 	PINMUX_IPSR_MSEL(IP14_7_4,	STP_OPWM_0_D,		SEL_SSP1_0_3),
 	PINMUX_IPSR_GPSR(IP14_7_4,	AUDIO_CLKOUT_D),
 	PINMUX_IPSR_MSEL(IP14_7_4,	SPEEDIN_B,		SEL_SPEED_PULSE_1),
@@ -1292,10 +1292,10 @@
 	PINMUX_IPSR_MSEL(IP14_31_28,	MSIOF1_SS2_F,		SEL_MSIOF1_5),
 
 	/* IPSR15 */
-	PINMUX_IPSR_MSEL(IP15_3_0,	SSI_SDATA1_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP15_3_0,	SSI_SDATA1_A,		SEL_SSI1_0),
 
-	PINMUX_IPSR_MSEL(IP15_7_4,	SSI_SDATA2_A,		SEL_SSI_0),
-	PINMUX_IPSR_MSEL(IP15_7_4,	SSI_SCK1_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP15_7_4,	SSI_SDATA2_A,		SEL_SSI2_0),
+	PINMUX_IPSR_MSEL(IP15_7_4,	SSI_SCK1_B,		SEL_SSI1_1),
 
 	PINMUX_IPSR_GPSR(IP15_11_8,	SSI_SCK34),
 	PINMUX_IPSR_MSEL(IP15_11_8,	MSIOF1_SS1_A,		SEL_MSIOF1_0),
@@ -1381,11 +1381,11 @@
 	PINMUX_IPSR_MSEL(IP16_27_24,	RIF1_D1_A,		SEL_DRIF1_0),
 	PINMUX_IPSR_MSEL(IP16_27_24,	RIF3_D1_A,		SEL_DRIF3_0),
 
-	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_SDATA9_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_SDATA9_A,		SEL_SSI9_0),
 	PINMUX_IPSR_MSEL(IP16_31_28,	HSCK2_B,		SEL_HSCIF2_1),
 	PINMUX_IPSR_MSEL(IP16_31_28,	MSIOF1_SS1_C,		SEL_MSIOF1_2),
 	PINMUX_IPSR_MSEL(IP16_31_28,	HSCK1_A,		SEL_HSCIF1_0),
-	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_WS1_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_WS1_B,		SEL_SSI1_1),
 	PINMUX_IPSR_GPSR(IP16_31_28,	SCK1),
 	PINMUX_IPSR_MSEL(IP16_31_28,	STP_IVCXO27_1_A,	SEL_SSP1_1_0),
 	PINMUX_IPSR_GPSR(IP16_31_28,	SCK5_A),
@@ -1417,7 +1417,7 @@
 
 	PINMUX_IPSR_GPSR(IP17_19_16,	USB1_PWEN),
 	PINMUX_IPSR_MSEL(IP17_19_16,	SIM0_CLK_C,		SEL_SIMCARD_2),
-	PINMUX_IPSR_MSEL(IP17_19_16,	SSI_SCK1_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP17_19_16,	SSI_SCK1_A,		SEL_SSI1_0),
 	PINMUX_IPSR_MSEL(IP17_19_16,	TS_SCK0_E,		SEL_TSIF0_4),
 	PINMUX_IPSR_MSEL(IP17_19_16,	STP_ISCLK_0_E,		SEL_SSP1_0_4),
 	PINMUX_IPSR_MSEL(IP17_19_16,	FMCLK_B,		SEL_FM_1),
@@ -1427,7 +1427,7 @@
 
 	PINMUX_IPSR_GPSR(IP17_23_20,	USB1_OVC),
 	PINMUX_IPSR_MSEL(IP17_23_20,	MSIOF1_SS2_C,		SEL_MSIOF1_2),
-	PINMUX_IPSR_MSEL(IP17_23_20,	SSI_WS1_A,		SEL_SSI_0),
+	PINMUX_IPSR_MSEL(IP17_23_20,	SSI_WS1_A,		SEL_SSI1_0),
 	PINMUX_IPSR_MSEL(IP17_23_20,	TS_SDAT0_E,		SEL_TSIF0_4),
 	PINMUX_IPSR_MSEL(IP17_23_20,	STP_ISD_0_E,		SEL_SSP1_0_4),
 	PINMUX_IPSR_MSEL(IP17_23_20,	FMIN_B,			SEL_FM_1),
@@ -1437,7 +1437,7 @@
 
 	PINMUX_IPSR_GPSR(IP17_27_24,	USB30_PWEN),
 	PINMUX_IPSR_GPSR(IP17_27_24,	AUDIO_CLKOUT_B),
-	PINMUX_IPSR_MSEL(IP17_27_24,	SSI_SCK2_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP17_27_24,	SSI_SCK2_B,		SEL_SSI2_1),
 	PINMUX_IPSR_MSEL(IP17_27_24,	TS_SDEN1_D,		SEL_TSIF1_3),
 	PINMUX_IPSR_MSEL(IP17_27_24,	STP_ISEN_1_D,		SEL_SSP1_1_2),
 	PINMUX_IPSR_MSEL(IP17_27_24,	STP_OPWM_0_E,		SEL_SSP1_0_4),
@@ -1449,7 +1449,7 @@
 
 	PINMUX_IPSR_GPSR(IP17_31_28,	USB30_OVC),
 	PINMUX_IPSR_GPSR(IP17_31_28,	AUDIO_CLKOUT1_B),
-	PINMUX_IPSR_MSEL(IP17_31_28,	SSI_WS2_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP17_31_28,	SSI_WS2_B,		SEL_SSI2_1),
 	PINMUX_IPSR_MSEL(IP17_31_28,	TS_SPSYNC1_D,		SEL_TSIF1_3),
 	PINMUX_IPSR_MSEL(IP17_31_28,	STP_ISSYNC_1_D,		SEL_SSP1_1_3),
 	PINMUX_IPSR_MSEL(IP17_31_28,	STP_IVCXO27_0_E,	SEL_SSP1_0_4),
@@ -1460,7 +1460,7 @@
 	/* IPSR18 */
 	PINMUX_IPSR_GPSR(IP18_3_0,	GP6_30),
 	PINMUX_IPSR_GPSR(IP18_3_0,	AUDIO_CLKOUT2_B),
-	PINMUX_IPSR_MSEL(IP18_3_0,	SSI_SCK9_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP18_3_0,	SSI_SCK9_B,		SEL_SSI9_1),
 	PINMUX_IPSR_MSEL(IP18_3_0,	TS_SDEN0_E,		SEL_TSIF0_4),
 	PINMUX_IPSR_MSEL(IP18_3_0,	STP_ISEN_0_E,		SEL_SSP1_0_4),
 	PINMUX_IPSR_MSEL(IP18_3_0,	RIF2_D0_B,		SEL_DRIF2_1),
@@ -1471,7 +1471,7 @@
 
 	PINMUX_IPSR_GPSR(IP18_7_4,	GP6_31),
 	PINMUX_IPSR_GPSR(IP18_7_4,	AUDIO_CLKOUT3_B),
-	PINMUX_IPSR_MSEL(IP18_7_4,	SSI_WS9_B,		SEL_SSI_1),
+	PINMUX_IPSR_MSEL(IP18_7_4,	SSI_WS9_B,		SEL_SSI9_1),
 	PINMUX_IPSR_MSEL(IP18_7_4,	TS_SPSYNC0_E,		SEL_TSIF0_4),
 	PINMUX_IPSR_MSEL(IP18_7_4,	STP_ISSYNC_0_E,		SEL_SSP1_0_4),
 	PINMUX_IPSR_MSEL(IP18_7_4,	RIF2_D1_B,		SEL_DRIF2_1),
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_com.h b/drivers/platform/msm/ep_pcie/ep_pcie_com.h
index 33e0314..36d49e4 100644
--- a/drivers/platform/msm/ep_pcie/ep_pcie_com.h
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_com.h
@@ -359,7 +359,7 @@
 	ulong                        global_irq_counter;
 
 	bool                         dump_conf;
-
+	bool                         config_mmio_init;
 	bool                         enumerated;
 	enum ep_pcie_link_status     link_status;
 	bool                         perst_deast;
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_core.c b/drivers/platform/msm/ep_pcie/ep_pcie_core.c
index 4558530..0ada0bf 100644
--- a/drivers/platform/msm/ep_pcie/ep_pcie_core.c
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_core.c
@@ -503,6 +503,13 @@
 		"Initial version of MMIO is:0x%x\n",
 		readl_relaxed(dev->mmio + PCIE20_MHIVER));
 
+	if (dev->config_mmio_init) {
+		EP_PCIE_DBG(dev,
+			"PCIe V%d: MMIO already initialized, return\n",
+				dev->rev);
+		return;
+	}
+
 	ep_pcie_write_reg(dev->mmio, PCIE20_MHICFG, 0x02800880);
 	ep_pcie_write_reg(dev->mmio, PCIE20_BHI_EXECENV, 0x2);
 	ep_pcie_write_reg(dev->mmio, PCIE20_MHICTRL, 0x0);
@@ -511,6 +518,8 @@
 	ep_pcie_write_reg(dev->mmio, PCIE20_BHI_VERSION_LOWER, 0x2);
 	ep_pcie_write_reg(dev->mmio, PCIE20_BHI_VERSION_UPPER, 0x1);
 	ep_pcie_write_reg(dev->mmio, PCIE20_BHI_INTVEC, 0xffffffff);
+
+	dev->config_mmio_init = true;
 }
 
 static void ep_pcie_core_init(struct ep_pcie_dev_t *dev, bool configured)
diff --git a/drivers/platform/msm/gsi/Makefile b/drivers/platform/msm/gsi/Makefile
index b350a59..1eed995 100644
--- a/drivers/platform/msm/gsi/Makefile
+++ b/drivers/platform/msm/gsi/Makefile
@@ -1 +1,8 @@
 obj-$(CONFIG_GSI) += gsi.o gsi_dbg.o
+
+ifdef CONFIG_X86
+ccflags-y += -DIPA_EMULATION_COMPILE=1
+obj-$(CONFIG_GSI) += gsi_emulation.o
+else
+ccflags-y += -DIPA_EMULATION_COMPILE=0
+endif
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index e729c56..2c29538 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -20,6 +20,7 @@
 #include <linux/delay.h>
 #include "gsi.h"
 #include "gsi_reg.h"
+#include "gsi_emulation.h"
 
 #define GSI_CMD_TIMEOUT (5*HZ)
 #define GSI_STOP_CMD_TIMEOUT_MS 20
@@ -33,6 +34,8 @@
 	{ },
 };
 
+static bool running_emulation = IPA_EMULATION_COMPILE;
+
 struct gsi_ctx *gsi_ctx;
 
 static void __gsi_config_type_irq(int ee, uint32_t mask, uint32_t val)
@@ -577,7 +580,7 @@
 		if (!type)
 			break;
 
-		GSIDBG_LOW("type %x\n", type);
+		GSIDBG_LOW("type 0x%x\n", type);
 
 		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK)
 			gsi_handle_ch_ctrl(ee);
@@ -777,17 +780,57 @@
 			GSIERR("bad irq specified %u\n", props->irq);
 			return -GSI_STATUS_INVALID_PARAMS;
 		}
-
-		res = devm_request_irq(gsi_ctx->dev, props->irq,
+		/*
+		 * On a real UE, there are two separate interrupt
+		 * vectors that get directed toward the GSI/IPA
+		 * drivers.  They are handled by gsi_isr() and
+		 * (ipa_isr() or ipa3_isr()) respectively.  In the
+		 * emulation environment, this is not the case;
+		 * instead, interrupt vectors are routed to the
+		 * emualation hardware's interrupt controller, who in
+		 * turn, forwards a single interrupt to the GSI/IPA
+		 * driver.  When the new interrupt vector is received,
+		 * the driver needs to probe the interrupt
+		 * controller's registers so see if one, the other, or
+		 * both interrupts have occurred.  Given the above, we
+		 * now need to handle both situations, namely: the
+		 * emulator's and the real UE.
+		 */
+		if (running_emulation) {
+			/*
+			 * New scheme involving the emulator's
+			 * interrupt controller.
+			 */
+			res = devm_request_threaded_irq(
+				gsi_ctx->dev,
+				props->irq,
+				/* top half handler to follow */
+				emulator_hard_irq_isr,
+				/* threaded bottom half handler to follow */
+				emulator_soft_irq_isr,
+				IRQF_SHARED,
+				"emulator_intcntrlr",
+				gsi_ctx);
+		} else {
+			/*
+			 * Traditional scheme used on the real UE.
+			 */
+			res = devm_request_irq(gsi_ctx->dev, props->irq,
 				(irq_handler_t) gsi_isr,
 				props->req_clk_cb ? IRQF_TRIGGER_RISING :
 					IRQF_TRIGGER_HIGH,
 				"gsi",
 				gsi_ctx);
+		}
 		if (res) {
-			GSIERR("failed to register isr for %u\n", props->irq);
+			GSIERR(
+			 "failed to register isr for %u\n",
+			 props->irq);
 			return -GSI_STATUS_ERROR;
 		}
+		GSIDBG(
+			"succeeded to register isr for %u\n",
+			props->irq);
 
 		res = enable_irq_wake(props->irq);
 		if (res)
@@ -808,6 +851,41 @@
 		return -GSI_STATUS_RES_ALLOC_FAILURE;
 	}
 
+	GSIDBG("GSI base(%pa) mapped to (%pK) with len (0x%lx)\n",
+	       &(props->phys_addr),
+	       gsi_ctx->base,
+	       props->size);
+
+	if (running_emulation) {
+		GSIDBG("GSI SW ver register value 0x%x\n",
+		       gsi_readl(gsi_ctx->base +
+		       GSI_EE_n_GSI_SW_VERSION_OFFS(0)));
+		gsi_ctx->intcntrlr_mem_size =
+		    props->emulator_intcntrlr_size;
+		gsi_ctx->intcntrlr_base =
+		    devm_ioremap_nocache(
+			gsi_ctx->dev,
+			props->emulator_intcntrlr_addr,
+			props->emulator_intcntrlr_size);
+		if (!gsi_ctx->intcntrlr_base) {
+			GSIERR(
+			  "failed to remap emulator's interrupt controller HW\n");
+			devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+			devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+			return -GSI_STATUS_RES_ALLOC_FAILURE;
+		}
+
+		GSIDBG(
+		    "Emulator's interrupt controller base(%pa) mapped to (%pK) with len (0x%lx)\n",
+		    &(props->emulator_intcntrlr_addr),
+		    gsi_ctx->intcntrlr_base,
+		    props->emulator_intcntrlr_size);
+
+		gsi_ctx->intcntrlr_gsi_isr = gsi_isr;
+		gsi_ctx->intcntrlr_client_isr =
+		    props->emulator_intcntrlr_client_isr;
+	}
+
 	gsi_ctx->per = *props;
 	gsi_ctx->per_registered = true;
 	mutex_init(&gsi_ctx->mlock);
@@ -816,6 +894,9 @@
 	gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver);
 	if (gsi_ctx->max_ch == 0) {
 		devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+		if (running_emulation)
+			devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+		gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
 		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
 		GSIERR("failed to get max channels\n");
 		return -GSI_STATUS_ERROR;
@@ -823,6 +904,9 @@
 	gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
 	if (gsi_ctx->max_ev == 0) {
 		devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+		if (running_emulation)
+			devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+		gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
 		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
 		GSIERR("failed to get max event rings\n");
 		return -GSI_STATUS_ERROR;
@@ -831,7 +915,9 @@
 	if (props->mhi_er_id_limits_valid &&
 	    props->mhi_er_id_limits[0] > (gsi_ctx->max_ev - 1)) {
 		devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
-		gsi_ctx->base = NULL;
+		if (running_emulation)
+			devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+		gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
 		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
 		GSIERR("MHI event ring start id %u is beyond max %u\n",
 			props->mhi_er_id_limits[0], gsi_ctx->max_ev);
@@ -872,6 +958,22 @@
 		gsi_writel(0, gsi_ctx->base +
 			GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee));
 
+	if (running_emulation) {
+		/*
+		 * Set up the emulator's interrupt controller...
+		 */
+		res = setup_emulator_cntrlr(
+		    gsi_ctx->intcntrlr_base, gsi_ctx->intcntrlr_mem_size);
+		if (res != 0) {
+			devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+			devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+			gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
+			devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+			GSIERR("setup_emulator_cntrlr() failed\n");
+			return res;
+		}
+	}
+
 	*dev_hdl = (uintptr_t)gsi_ctx;
 
 	return GSI_STATUS_SUCCESS;
@@ -1190,6 +1292,7 @@
 		if (!props->evchid_valid)
 			clear_bit(evt_id, &gsi_ctx->evt_bmap);
 		mutex_unlock(&gsi_ctx->mlock);
+		BUG();
 		return -GSI_STATUS_RES_ALLOC_FAILURE;
 	}
 
@@ -1774,6 +1877,32 @@
 }
 EXPORT_SYMBOL(gsi_alloc_channel);
 
+static void __gsi_read_channel_scratch(unsigned long chan_hdl,
+		union __packed gsi_channel_scratch * val)
+{
+	uint32_t reg;
+
+	reg = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	val->data.word1 = reg;
+
+	reg = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	val->data.word2 = reg;
+
+	reg = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	val->data.word3 = reg;
+
+	reg = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	val->data.word4 = reg;
+}
+
 static void __gsi_write_channel_scratch(unsigned long chan_hdl,
 		union __packed gsi_channel_scratch val)
 {
@@ -1834,6 +1963,40 @@
 }
 EXPORT_SYMBOL(gsi_write_channel_scratch);
 
+int gsi_read_channel_scratch(unsigned long chan_hdl,
+	union __packed gsi_channel_scratch * ch_scratch)
+{
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
+		gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
+		gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
+		GSIERR("bad state %d\n",
+				gsi_ctx->chan[chan_hdl].state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	mutex_lock(&ctx->mlock);
+	__gsi_read_channel_scratch(chan_hdl, ch_scratch);
+	ctx->restore_scratch = *ch_scratch;
+	mutex_unlock(&ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_read_channel_scratch);
+
 int gsi_query_channel_db_addr(unsigned long chan_hdl,
 		uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
 {
@@ -2134,6 +2297,10 @@
 		BUG();
 	}
 
+	/* Hardware issue fixed from GSI 2.0 and no need for the WA */
+	if (gsi_ctx->per.ver >= GSI_VER_2_0)
+		reset_done = true;
+
 	/* workaround: reset GSI producers again */
 	if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && !reset_done) {
 		usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP);
@@ -2512,7 +2679,7 @@
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
 
-	if (ctx->state != GSI_CHAN_STATE_STARTED) {
+	if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
 		GSIERR("bad state %d\n", ctx->state);
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
@@ -2621,21 +2788,27 @@
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
 
-	spin_lock_irqsave(&gsi_ctx->slock, flags);
 	if (curr == GSI_CHAN_MODE_CALLBACK &&
 			mode == GSI_CHAN_MODE_POLL) {
+		spin_lock_irqsave(&gsi_ctx->slock, flags);
 		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
+		spin_unlock_irqrestore(&gsi_ctx->slock, flags);
+		spin_lock_irqsave(&ctx->ring.slock, flags);
 		atomic_set(&ctx->poll_mode, mode);
+		spin_unlock_irqrestore(&ctx->ring.slock, flags);
 		ctx->stats.callback_to_poll++;
 	}
 
 	if (curr == GSI_CHAN_MODE_POLL &&
 			mode == GSI_CHAN_MODE_CALLBACK) {
+		spin_lock_irqsave(&ctx->ring.slock, flags);
 		atomic_set(&ctx->poll_mode, mode);
+		spin_unlock_irqrestore(&ctx->ring.slock, flags);
+		spin_lock_irqsave(&gsi_ctx->slock, flags);
 		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
+		spin_unlock_irqrestore(&gsi_ctx->slock, flags);
 		ctx->stats.poll_to_callback++;
 	}
-	spin_unlock_irqrestore(&gsi_ctx->slock, flags);
 
 	return GSI_STATUS_SUCCESS;
 }
@@ -2730,24 +2903,47 @@
 {
 	void __iomem *gsi_base = (void __iomem *)base;
 
-	gsi_writel(1, gsi_base + GSI_GSI_IRAM_PTR_CH_CMD_OFFS);
-	gsi_writel(2, gsi_base + GSI_GSI_IRAM_PTR_CH_DB_OFFS);
-	gsi_writel(3, gsi_base + GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS);
-	gsi_writel(4, gsi_base + GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS);
-	gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS);
-	gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS);
-	gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPED_OFFS);
-	gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS);
-	gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS);
-	gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS);
-	gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS);
-	gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS);
-	gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS);
+	gsi_writel(1,
+		   gsi_base + GSI_GSI_IRAM_PTR_CH_CMD_OFFS);
+	gsi_writel(2,
+		   gsi_base + GSI_GSI_IRAM_PTR_CH_DB_OFFS);
+	gsi_writel(3,
+		   gsi_base + GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS);
+	gsi_writel(4,
+		   gsi_base + GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS);
+	gsi_writel(5,
+		   gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS);
+	gsi_writel(6,
+		   gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS);
+	gsi_writel(7,
+		   gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPED_OFFS);
+	gsi_writel(8,
+		   gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS);
+	gsi_writel(9,
+		   gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS);
+	gsi_writel(10,
+		   gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS);
+	gsi_writel(11,
+		   gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS);
+	gsi_writel(12,
+		   gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS);
+	gsi_writel(13,
+		   gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS);
+
+	if (running_emulation) {
+		gsi_writel(14,
+			   gsi_base + GSI_GSI_IRAM_PTR_EV_DB_OFFS);
+		gsi_writel(15,
+			   gsi_base + GSI_GSI_IRAM_PTR_UC_GP_INT_OFFS);
+		gsi_writel(16,
+			   gsi_base + GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS);
+	}
 }
 
 static void gsi_configure_bck_prs_matrix(void *base)
 {
 	void __iomem *gsi_base = (void __iomem *)base;
+
 	/*
 	 * For now, these are default values. In the future, GSI FW image will
 	 * produce optimized back-pressure values based on the FW image.
@@ -2970,15 +3166,45 @@
 	},
 };
 
+static struct platform_device *pdev;
+
 /**
  * Module Init.
  */
 static int __init gsi_init(void)
 {
+	int ret;
+
 	pr_debug("gsi_init\n");
-	return platform_driver_register(&msm_gsi_driver);
+
+	ret = platform_driver_register(&msm_gsi_driver);
+	if (ret < 0)
+		goto out;
+
+	if (running_emulation) {
+		pdev = platform_device_register_simple("gsi", -1, NULL, 0);
+		if (IS_ERR(pdev)) {
+			ret = PTR_ERR(pdev);
+			platform_driver_unregister(&msm_gsi_driver);
+			goto out;
+		}
+	}
+
+out:
+	return ret;
 }
 
+/*
+ * Module exit.
+ */
+static void __exit gsi_exit(void)
+{
+	if (running_emulation && pdev)
+		platform_device_unregister(pdev);
+	platform_driver_unregister(&msm_gsi_driver);
+}
+
+module_exit(gsi_exit);
 arch_initcall(gsi_init);
 
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index 7e10405..4a3f4ec 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,8 +18,16 @@
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
 #include <linux/msm_gsi.h>
+#include <linux/errno.h>
 #include <linux/ipc_logging.h>
 
+/*
+ * The following for adding code (ie. for EMULATION) not found on x86.
+ */
+#if IPA_EMULATION_COMPILE == 1
+# include "gsi_emulation_stubs.h"
+#endif
+
 #define GSI_CHAN_MAX      31
 #define GSI_EVT_RING_MAX  23
 #define GSI_NO_EVT_ERINDEX 31
@@ -129,6 +137,7 @@
 	bool allocated;
 	atomic_t poll_mode;
 	union __packed gsi_channel_scratch scratch;
+	union __packed gsi_channel_scratch restore_scratch;
 	struct gsi_chan_stats stats;
 	bool enable_dp_stats;
 	bool print_dp_stats;
@@ -204,6 +213,13 @@
 	struct completion gen_ee_cmd_compl;
 	void *ipc_logbuf;
 	void *ipc_logbuf_low;
+	/*
+	 * The following used only on emulation systems.
+	 */
+	void __iomem *intcntrlr_base;
+	u32 intcntrlr_mem_size;
+	irq_handler_t intcntrlr_gsi_isr;
+	irq_handler_t intcntrlr_client_isr;
 };
 
 enum gsi_re_type {
diff --git a/drivers/platform/msm/gsi/gsi_emulation.c b/drivers/platform/msm/gsi/gsi_emulation.c
new file mode 100644
index 0000000..adaaaaa
--- /dev/null
+++ b/drivers/platform/msm/gsi/gsi_emulation.c
@@ -0,0 +1,233 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "gsi_emulation.h"
+
+/*
+ * *****************************************************************************
+ * The following used to set up the EMULATION interrupt controller...
+ * *****************************************************************************
+ */
+int setup_emulator_cntrlr(
+	void __iomem *intcntrlr_base,
+	u32           intcntrlr_mem_size)
+{
+	uint32_t val, ver, intrCnt, rangeCnt, range;
+
+	val = gsi_emu_readl(intcntrlr_base + GE_INT_CTL_VER_CNT);
+
+	intrCnt  = val & 0xFFFF;
+	ver      = (val >> 16) & 0xFFFF;
+	rangeCnt = intrCnt / 32;
+
+	GSIDBG(
+	    "CTL_VER_CNT reg val(0x%x) intr cnt(%u) cntrlr ver(0x%x) rangeCnt(%u)\n",
+	    val, intrCnt, ver, rangeCnt);
+
+	/*
+	 * Verify the interrupt controller version
+	 */
+	if (ver == 0 || ver == 0xFFFF || ver < DEO_IC_INT_CTL_VER_MIN) {
+		GSIERR(
+		  "Error: invalid interrupt controller version 0x%x\n",
+		  ver);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	/*
+	 * Verify the interrupt count
+	 *
+	 * NOTE: intrCnt must be at least one block and multiple of 32
+	 */
+	if ((intrCnt % 32) != 0) {
+		GSIERR(
+		  "Invalid interrupt count read from HW 0x%04x\n",
+		  intrCnt);
+		return -GSI_STATUS_ERROR;
+	}
+
+	/*
+	 * Calculate number of ranges used, each range handles 32 int lines
+	 */
+	if (rangeCnt > DEO_IC_MAX_RANGE_CNT) {
+		GSIERR(
+		  "SW interrupt limit(%u) passed, increase DEO_IC_MAX_RANGE_CNT(%u)\n",
+		  rangeCnt,
+		  DEO_IC_MAX_RANGE_CNT);
+		return -GSI_STATUS_ERROR;
+	}
+
+	/*
+	 * Let's take the last register offset minus the first
+	 * register offset (ie. range) and compare it to the interrupt
+	 * controller's dtsi defined memory size.  The range better
+	 * fit within the size.
+	 */
+	val = GE_SOFT_INT_n(rangeCnt-1) - GE_INT_CTL_VER_CNT;
+	if (val > intcntrlr_mem_size) {
+		GSIERR(
+		    "Interrupt controller register range (%u) exceeds dtsi provisioned size (%u)\n",
+		    val, intcntrlr_mem_size);
+		return -GSI_STATUS_ERROR;
+	}
+
+	/*
+	 * The following will disable the emulators interrupt controller,
+	 * so that we can config it...
+	 */
+	GSIDBG("Writing GE_INT_MASTER_ENABLE\n");
+	gsi_emu_writel(
+		0x0,
+		intcntrlr_base + GE_INT_MASTER_ENABLE);
+
+	/*
+	 * Init register maps of all ranges
+	 */
+	for (range = 0; range < rangeCnt; range++) {
+		/*
+		 * Disable all int sources by setting all enable clear bits
+		 */
+		GSIDBG("Writing GE_INT_ENABLE_CLEAR_n(%u)\n", range);
+		gsi_emu_writel(
+		    0xFFFFFFFF,
+		    intcntrlr_base + GE_INT_ENABLE_CLEAR_n(range));
+
+		/*
+		 * Clear all raw statuses
+		 */
+		GSIDBG("Writing GE_INT_CLEAR_n(%u)\n", range);
+		gsi_emu_writel(
+		    0xFFFFFFFF,
+		    intcntrlr_base + GE_INT_CLEAR_n(range));
+
+		/*
+		 * Init all int types
+		 */
+		GSIDBG("Writing GE_INT_TYPE_n(%u)\n", range);
+		gsi_emu_writel(
+		    0x0,
+		    intcntrlr_base + GE_INT_TYPE_n(range));
+	}
+
+	/*
+	 * The following tells the interrupt controller to interrupt us
+	 * when it sees interupts from ipa and/or gsi.
+	 *
+	 * Interrupts:
+	 * ===================================================================
+	 * DUT0                       [  63 :   16 ]
+	 * ipa_irq                                        [ 3 : 0 ] <---HERE
+	 * ipa_gsi_bam_irq                                [ 7 : 4 ] <---HERE
+	 * ipa_bam_apu_sec_error_irq                      [ 8 ]
+	 * ipa_bam_apu_non_sec_error_irq                  [ 9 ]
+	 * ipa_bam_xpu2_msa_intr                          [ 10 ]
+	 * ipa_vmidmt_nsgcfgirpt                          [ 11 ]
+	 * ipa_vmidmt_nsgirpt                             [ 12 ]
+	 * ipa_vmidmt_gcfgirpt                            [ 13 ]
+	 * ipa_vmidmt_girpt                               [ 14 ]
+	 * bam_xpu3_qad_non_secure_intr_sp                [ 15 ]
+	 */
+	GSIDBG("Writing GE_INT_ENABLE_n(0)\n");
+	gsi_emu_writel(
+	    0x00FF, /* See <---HERE above */
+	    intcntrlr_base + GE_INT_ENABLE_n(0));
+
+	/*
+	 * The following will enable the IC post config...
+	 */
+	GSIDBG("Writing GE_INT_MASTER_ENABLE\n");
+	gsi_emu_writel(
+	    0x1,
+	    intcntrlr_base + GE_INT_MASTER_ENABLE);
+
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * The following for EMULATION hard irq...
+ * *****************************************************************************
+ */
+irqreturn_t emulator_hard_irq_isr(
+	int   irq,
+	void *ctxt)
+{
+	struct gsi_ctx *gsi_ctx_ptr = (struct gsi_ctx *) ctxt;
+
+	uint32_t val;
+
+	val = gsi_emu_readl(gsi_ctx_ptr->intcntrlr_base + GE_INT_MASTER_STATUS);
+
+	/*
+	 * If bit zero is set, interrupt is for us, hence return IRQ_NONE
+	 * when it's not set...
+	 */
+	if (!(val & 0x00000001))
+		return IRQ_NONE;
+
+	/*
+	 * The following will mask (ie. turn off) future interrupts from
+	 * the emulator's interrupt controller. It wil stay this way until
+	 * we turn back on...which will be done in the bottom half
+	 * (ie. emulator_soft_irq_isr)...
+	 */
+	gsi_emu_writel(
+		0x0,
+		gsi_ctx_ptr->intcntrlr_base + GE_INT_OUT_ENABLE);
+
+	return IRQ_WAKE_THREAD;
+}
+
+/*
+ * *****************************************************************************
+ * The following for EMULATION soft irq...
+ * *****************************************************************************
+ */
+irqreturn_t emulator_soft_irq_isr(
+	int   irq,
+	void *ctxt)
+{
+	struct gsi_ctx *gsi_ctx_ptr = (struct gsi_ctx *) ctxt;
+
+	irqreturn_t retVal = IRQ_HANDLED;
+	uint32_t	val;
+
+	val = gsi_emu_readl(gsi_ctx_ptr->intcntrlr_base + GE_IRQ_STATUS_n(0));
+
+	GSIDBG("Got irq(%d) with status(0x%08X)\n", irq, val);
+
+	if (val & 0xF0 && gsi_ctx_ptr->intcntrlr_gsi_isr) {
+		GSIDBG("Got gsi interrupt\n");
+		retVal = gsi_ctx_ptr->intcntrlr_gsi_isr(irq, ctxt);
+	}
+
+	if (val & 0x0F && gsi_ctx_ptr->intcntrlr_client_isr) {
+		GSIDBG("Got ipa interrupt\n");
+		retVal = gsi_ctx_ptr->intcntrlr_client_isr(irq, 0);
+	}
+
+	/*
+	 * The following will clear the interrupts...
+	 */
+	gsi_emu_writel(
+		0xFFFFFFFF,
+		gsi_ctx_ptr->intcntrlr_base + GE_INT_CLEAR_n(0));
+
+	/*
+	 * The following will unmask (ie. turn on) future interrupts from
+	 * the emulator's interrupt controller...
+	 */
+	gsi_emu_writel(
+		0x1,
+		gsi_ctx_ptr->intcntrlr_base + GE_INT_OUT_ENABLE);
+
+	return retVal;
+}
diff --git a/drivers/platform/msm/gsi/gsi_emulation.h b/drivers/platform/msm/gsi/gsi_emulation.h
new file mode 100644
index 0000000..246f9a8
--- /dev/null
+++ b/drivers/platform/msm/gsi/gsi_emulation.h
@@ -0,0 +1,192 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#if !defined(_GSI_EMULATION_H_)
+# define _GSI_EMULATION_H_
+
+# include <linux/interrupt.h>
+
+# include "gsi.h"
+# include "gsi_reg.h"
+# include "gsi_emulation_stubs.h"
+
+# define gsi_emu_readl(c)     ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+# define gsi_emu_writel(v, c) ({ __iowmb(); writel_relaxed((v), (c)); })
+
+# define CNTRLR_BASE 0
+
+/*
+ * The following file contains definitions and declarations that are
+ * germane only to the IPA emulation system, which is run from an X86
+ * environment.  Declaration's for non-X86 (ie. arm) are merely stubs
+ * to facilitate compile and link.
+ *
+ * Interrupt controller registers.
+ * Descriptions taken from the EMULATION interrupt controller SWI.
+ * - There is only one Master Enable register
+ * - Each group of 32 interrupt lines (range) is controlled by 8 registers,
+ *   which are consecutive in memory:
+ *      GE_INT_ENABLE_n
+ *      GE_INT_ENABLE_CLEAR_n
+ *      GE_INT_ENABLE_SET_n
+ *      GE_INT_TYPE_n
+ *      GE_IRQ_STATUS_n
+ *      GE_RAW_STATUS_n
+ *      GE_INT_CLEAR_n
+ *      GE_SOFT_INT_n
+ * - After the above 8 registers, there are the registers of the next
+ *   group (range) of 32 interrupt lines, and so on.
+ */
+
+/** @brief The interrupt controller version and interrupt count register.
+ *         Specifies interrupt controller version (upper 16 bits) and the
+ *         number of interrupt lines supported by HW (lower 16 bits).
+ */
+# define GE_INT_CTL_VER_CNT              \
+	(CNTRLR_BASE + 0x0000)
+
+/** @brief Enable or disable physical IRQ output signal to the system,
+ *         not affecting any status registers.
+ *
+ *         0x0 : DISABLE IRQ output disabled
+ *         0x1 : ENABLE  IRQ output enabled
+ */
+# define GE_INT_OUT_ENABLE               \
+	(CNTRLR_BASE + 0x0004)
+
+/** @brief The IRQ master enable register.
+ *         Bit #0: IRQ_ENABLE, set 0 to disable, 1 to enable.
+ */
+# define GE_INT_MASTER_ENABLE            \
+	(CNTRLR_BASE + 0x0008)
+
+# define GE_INT_MASTER_STATUS            \
+	(CNTRLR_BASE + 0x000C)
+
+/** @brief Each bit disables (bit=0, default) or enables (bit=1) the
+ *         corresponding interrupt source
+ */
+# define GE_INT_ENABLE_n(n)              \
+	(CNTRLR_BASE + 0x0010 + 0x20 * (n))
+
+/** @brief Write bit=1 to clear (to 0) the corresponding bit(s) in INT_ENABLE.
+ *         Does nothing for bit=0
+ */
+# define GE_INT_ENABLE_CLEAR_n(n)        \
+	(CNTRLR_BASE + 0x0014 + 0x20 * (n))
+
+/** @brief Write bit=1 to set (to 1) the corresponding bit(s) in INT_ENABLE.
+ *         Does nothing for bit=0
+ */
+# define GE_INT_ENABLE_SET_n(n)          \
+	(CNTRLR_BASE + 0x0018 + 0x20 * (n))
+
+/** @brief Select level (bit=0, default) or edge (bit=1) sensitive input
+ *         detection logic for each corresponding interrupt source
+ */
+# define GE_INT_TYPE_n(n)                \
+	(CNTRLR_BASE + 0x001C + 0x20 * (n))
+
+/** @brief Shows the interrupt sources captured in RAW_STATUS that have been
+ *         steered to irq_n by INT_SELECT. Interrupts must also be enabled by
+ *         INT_ENABLE and MASTER_ENABLE. Read only register.
+ *         Bit values: 1=active, 0=inactive
+ */
+# define GE_IRQ_STATUS_n(n)                      \
+	(CNTRLR_BASE + 0x0020 + 0x20 * (n))
+
+/** @brief Shows the interrupt sources that have been latched by the input
+ *         logic of the Interrupt Controller. Read only register.
+ *         Bit values: 1=active, 0=inactive
+ */
+# define GE_RAW_STATUS_n(n)                      \
+	(CNTRLR_BASE + 0x0024 + 0x20 * (n))
+
+/** @brief Write bit=1 to clear the corresponding bit(s) in RAW_STATUS.
+ *         Does nothing for bit=0
+ */
+# define GE_INT_CLEAR_n(n)               \
+	(CNTRLR_BASE + 0x0028 + 0x20 * (n))
+
+/** @brief Write bit=1 to set the corresponding bit(s) in RAW_STATUS.
+ *         Does nothing for bit=0.
+ *  @note  Only functional for edge detected interrupts
+ */
+# define GE_SOFT_INT_n(n)                        \
+	(CNTRLR_BASE + 0x002C + 0x20 * (n))
+
+/** @brief Maximal number of ranges in SW. Each range supports 32 interrupt
+ *         lines. If HW is extended considerably, increase this value
+ */
+# define DEO_IC_MAX_RANGE_CNT            8
+
+/** @brief Size of the registers of one range in memory, in bytes */
+# define DEO_IC_RANGE_MEM_SIZE           32  /* SWI: 8 registers, no gaps */
+
+/** @brief Minimal Interrupt controller HW version */
+# define DEO_IC_INT_CTL_VER_MIN          0x0102
+
+
+#if IPA_EMULATION_COMPILE == 1 /* declarations to follow */
+
+/*
+ * *****************************************************************************
+ * The following used to set up the EMULATION interrupt controller...
+ * *****************************************************************************
+ */
+int setup_emulator_cntrlr(
+	void __iomem *intcntrlr_base,
+	u32           intcntrlr_mem_size);
+
+/*
+ * *****************************************************************************
+ * The following for EMULATION hard irq...
+ * *****************************************************************************
+ */
+irqreturn_t emulator_hard_irq_isr(
+	int   irq,
+	void *ctxt);
+
+/*
+ * *****************************************************************************
+ * The following for EMULATION soft irq...
+ * *****************************************************************************
+ */
+irqreturn_t emulator_soft_irq_isr(
+	int   irq,
+	void *ctxt);
+
+# else /* #if IPA_EMULATION_COMPILE != 1, then definitions to follow */
+
+static inline int setup_emulator_cntrlr(
+	void __iomem *intcntrlr_base,
+	u32           intcntrlr_mem_size)
+{
+	return 0;
+}
+
+static inline irqreturn_t emulator_hard_irq_isr(
+	int   irq,
+	void *ctxt)
+{
+	return IRQ_NONE;
+}
+
+static inline irqreturn_t emulator_soft_irq_isr(
+	int   irq,
+	void *ctxt)
+{
+	return IRQ_HANDLED;
+}
+
+# endif /* #if IPA_EMULATION_COMPILE == 1 */
+
+#endif /* #if !defined(_GSI_EMULATION_H_) */
diff --git a/drivers/platform/msm/gsi/gsi_emulation_stubs.h b/drivers/platform/msm/gsi/gsi_emulation_stubs.h
new file mode 100644
index 0000000..dd9d0df
--- /dev/null
+++ b/drivers/platform/msm/gsi/gsi_emulation_stubs.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_GSI_EMULATION_STUBS_H_)
+# define _GSI_EMULATION_STUBS_H_
+
+# include <asm/barrier.h>
+# define __iormb()       rmb() /* used in gsi.h */
+# define __iowmb()       wmb() /* used in gsi.h */
+
+#endif /* #if !defined(_GSI_EMULATION_STUBS_H_) */
diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile
index 15ed471..857e17b 100644
--- a/drivers/platform/msm/ipa/Makefile
+++ b/drivers/platform/msm/ipa/Makefile
@@ -1,3 +1,9 @@
+ifdef CONFIG_X86
+ccflags-y += -DIPA_EMULATION_COMPILE=1
+else
+ccflags-y += -DIPA_EMULATION_COMPILE=0
+endif
+
 obj-$(CONFIG_IPA) += ipa_v2/ ipa_clients/ ipa_common
 obj-$(CONFIG_IPA3) += ipa_v3/ ipa_clients/ ipa_common
 obj-$(CONFIG_IPA_UT) += test/
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index fdcf44d..3a75bdd 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -19,8 +19,16 @@
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/ipa_uc_offload.h>
+#include <linux/pci.h>
 #include "ipa_api.h"
 
+/*
+ * The following for adding code (ie. for EMULATION) not found on x86.
+ */
+#if IPA_EMULATION_COMPILE == 1
+# include "ipa_v3/ipa_emulation_stubs.h"
+#endif
+
 #define DRV_NAME "ipa"
 
 #define IPA_API_DISPATCH_RETURN(api, p...) \
@@ -96,6 +104,8 @@
 		} \
 	} while (0)
 
+static bool running_emulation = IPA_EMULATION_COMPILE;
+
 static enum ipa_hw_type ipa_api_hw_type;
 static struct ipa_api_controller *ipa_api_ctrl;
 
@@ -2916,6 +2926,57 @@
 	{}
 };
 
+/*********************************************************/
+/*                PCIe Version                           */
+/*********************************************************/
+
+static const struct of_device_id ipa_pci_drv_match[] = {
+	{ .compatible = "qcom,ipa", },
+	{}
+};
+
+/*
+ * Forward declarations of static functions required for PCI
+ * registraion
+ *
+ * VENDOR and DEVICE should be defined in pci_ids.h
+ */
+static int ipa_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void ipa_pci_remove(struct pci_dev *pdev);
+static void ipa_pci_shutdown(struct pci_dev *pdev);
+static pci_ers_result_t ipa_pci_io_error_detected(struct pci_dev *dev,
+	pci_channel_state_t state);
+static pci_ers_result_t ipa_pci_io_slot_reset(struct pci_dev *dev);
+static void ipa_pci_io_resume(struct pci_dev *dev);
+
+#define LOCAL_VENDOR 0x17CB
+#define LOCAL_DEVICE 0x00ff
+
+static const char ipa_pci_driver_name[] = "qcipav3";
+
+static const struct pci_device_id ipa_pci_tbl[] = {
+	{ PCI_DEVICE(LOCAL_VENDOR, LOCAL_DEVICE) },
+	{ 0, 0, 0, 0, 0, 0, 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, ipa_pci_tbl);
+
+/* PCI Error Recovery */
+static const struct pci_error_handlers ipa_pci_err_handler = {
+	.error_detected = ipa_pci_io_error_detected,
+	.slot_reset = ipa_pci_io_slot_reset,
+	.resume = ipa_pci_io_resume,
+};
+
+static struct pci_driver ipa_pci_driver = {
+	.name     = ipa_pci_driver_name,
+	.id_table = ipa_pci_tbl,
+	.probe    = ipa_pci_probe,
+	.remove   = ipa_pci_remove,
+	.shutdown = ipa_pci_shutdown,
+	.err_handler = &ipa_pci_err_handler
+};
+
 static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p)
 {
 	int result;
@@ -3364,10 +3425,86 @@
 	},
 };
 
+/*********************************************************/
+/*                PCIe Version                           */
+/*********************************************************/
+
+static int ipa_pci_probe(
+	struct pci_dev             *pci_dev,
+	const struct pci_device_id *ent)
+{
+	int result;
+
+	if (!pci_dev || !ent) {
+		pr_err(
+		    "Bad arg: pci_dev (%pK) and/or ent (%pK)\n",
+		    pci_dev, ent);
+		return -EOPNOTSUPP;
+	}
+
+	if (!ipa_api_ctrl) {
+		ipa_api_ctrl = kzalloc(sizeof(*ipa_api_ctrl), GFP_KERNEL);
+		if (ipa_api_ctrl == NULL)
+			return -ENOMEM;
+		/* Get IPA HW Version */
+		result = of_property_read_u32(NULL,
+			"qcom,ipa-hw-ver", &ipa_api_hw_type);
+		if (result || ipa_api_hw_type == 0) {
+			pr_err("ipa: get resource failed for ipa-hw-ver!\n");
+			kfree(ipa_api_ctrl);
+			ipa_api_ctrl = NULL;
+			return -ENODEV;
+		}
+		pr_debug("ipa: ipa_api_hw_type = %d", ipa_api_hw_type);
+	}
+
+	/*
+	 * Call a reduced version of platform_probe appropriate for PCIe
+	 */
+	result = ipa3_pci_drv_probe(pci_dev, ipa_api_ctrl, ipa_pci_drv_match);
+
+	if (result && result != -EPROBE_DEFER)
+		pr_err("ipa: ipa3_pci_drv_probe failed\n");
+
+	if (running_emulation)
+		ipa_ut_module_init();
+
+	return result;
+}
+
+static void ipa_pci_remove(struct pci_dev *pci_dev)
+{
+	if (running_emulation)
+		ipa_ut_module_exit();
+}
+
+static void ipa_pci_shutdown(struct pci_dev *pci_dev)
+{
+}
+
+static pci_ers_result_t ipa_pci_io_error_detected(struct pci_dev *pci_dev,
+	pci_channel_state_t state)
+{
+	return 0;
+}
+
+static pci_ers_result_t ipa_pci_io_slot_reset(struct pci_dev *pci_dev)
+{
+	return 0;
+}
+
+static void ipa_pci_io_resume(struct pci_dev *pci_dev)
+{
+}
+
 static int __init ipa_module_init(void)
 {
 	pr_debug("IPA module init\n");
 
+	if (running_emulation) {
+		/* Register as a PCI device driver */
+		return pci_register_driver(&ipa_pci_driver);
+	}
 	/* Register as a platform device driver */
 	return platform_driver_register(&ipa_plat_drv);
 }
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index b7edb6f..cbcb0ee 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -452,6 +452,10 @@
 int ipa3_plat_drv_probe(struct platform_device *pdev_p,
 	struct ipa_api_controller *api_ctrl,
 	const struct of_device_id *pdrv_match);
+int ipa3_pci_drv_probe(
+	struct pci_dev            *pci_dev,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match);
 #else
 static inline int ipa3_plat_drv_probe(struct platform_device *pdev_p,
 	struct ipa_api_controller *api_ctrl,
@@ -459,6 +463,13 @@
 {
 	return -ENODEV;
 }
+static inline int ipa3_pci_drv_probe(
+	struct pci_dev            *pci_dev,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match)
+{
+	return -ENODEV;
+}
 #endif /* (CONFIG_IPA3) */
 
 #endif /* _IPA_API_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_clients/Makefile b/drivers/platform/msm/ipa/ipa_clients/Makefile
index 738d88f..a213130 100644
--- a/drivers/platform/msm/ipa/ipa_clients/Makefile
+++ b/drivers/platform/msm/ipa/ipa_clients/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o ipa_wdi3.o
+obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o ipa_wdi3.o ipa_gsb.o
 obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o ipa_wdi3.o
 obj-$(CONFIG_ECM_IPA) += ecm_ipa.o
 obj-$(CONFIG_RNDIS_IPA) += rndis_ipa.o
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c
new file mode 100644
index 0000000..5812d8d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c
@@ -0,0 +1,1084 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/ipv6.h>
+#include <net/addrconf.h>
+#include <linux/ipa.h>
+#include <linux/cdev.h>
+#include <linux/ipa_odu_bridge.h>
+#include "../ipa_common_i.h"
+#ifdef CONFIG_IPA3
+#include "../ipa_v3/ipa_pm.h"
+#endif
+
+#define IPA_GSB_DRV_NAME "ipa_gsb"
+
+#define MAX_SUPPORTED_IFACE 5
+
+#define IPA_GSB_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPA_GSB_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_GSB_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_GSB_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_GSB_ERR(fmt, args...) \
+	do { \
+		pr_err(IPA_GSB_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_GSB_MAX_MSG_LEN 512
+static char dbg_buff[IPA_GSB_MAX_MSG_LEN];
+
+#define IPA_GSB_SKB_HEADROOM 256
+#define IPA_GSB_SKB_DUMMY_HEADER 42
+#define IPA_GSB_AGGR_BYTE_LIMIT 14
+#define IPA_GSB_AGGR_TIME_LIMIT 1
+
+static struct dentry *dent;
+static struct dentry *dfile_stats;
+
+/**
+ * struct stats - driver statistics,
+ * @num_ul_packets: number of uplink packets
+ * @num_dl_packets: number of downlink packets
+ * @num_insufficient_headroom_packets: number of
+	packets with insufficient headroom
+ */
+struct stats {
+	u64 num_ul_packets;
+	u64 num_dl_packets;
+	u64 num_insufficient_headroom_packets;
+};
+
+/**
+ * struct ipa_gsb_mux_hdr - ipa gsb mux header,
+ * @iface_hdl: interface handle
+ * @qmap_id: qmap id
+ * @pkt_size: packet size
+ */
+struct ipa_gsb_mux_hdr {
+	u8 iface_hdl;
+	u8 qmap_id;
+	u16 pkt_size;
+};
+
+/**
+ * struct ipa_gsb_iface_info - GSB interface information
+ * @netdev_name: network interface name
+ * @device_ethaddr: network interface ethernet address
+ * @priv: client's private data. to be used in client's callbacks
+ * @tx_dp_notify: client callback for handling IPA ODU_PROD callback
+ * @send_dl_skb: client callback for sending skb in downlink direction
+ * @iface_stats: statistics, how many packets were transmitted
+ * using the SW bridge.
+ * @partial_hdr_hdl: handle for partial header
+ * @wakeup_request: client callback to wakeup
+ * @is_conencted: is interface connected ?
+ * @is_resumed: is interface resumed ?
+ * @iface_hdl: interface handle
+ */
+struct ipa_gsb_iface_info {
+	char netdev_name[IPA_RESOURCE_NAME_MAX];
+	u8 device_ethaddr[ETH_ALEN];
+	void *priv;
+	ipa_notify_cb tx_dp_notify;
+	int (*send_dl_skb)(void *priv, struct sk_buff *skb);
+	struct stats iface_stats;
+	uint32_t partial_hdr_hdl[IPA_IP_MAX];
+	void (*wakeup_request)(void *);
+	bool is_connected;
+	bool is_resumed;
+	u8 iface_hdl;
+};
+
+/**
+ * struct ipa_gsb_context - GSB driver context information
+ * @logbuf: buffer of ipc logging
+ * @logbuf_low: buffer of ipc logging (low priority)
+ * @lock: mutex lock
+ * @prod_hdl: handle for prod pipe
+ * @cons_hdl: handle for cons pipe
+ * @ipa_sys_desc_size: sys pipe desc size
+ * @num_iface: number of interface
+ * @iface_hdl: interface handles
+ * @num_connected_iface: number of connected interface
+ * @num_resumed_iface: number of resumed interface
+ * @iface: interface information
+ * @pm_hdl: IPA PM handle
+ */
+struct ipa_gsb_context {
+	void *logbuf;
+	void *logbuf_low;
+	struct mutex lock;
+	u32 prod_hdl;
+	u32 cons_hdl;
+	u32 ipa_sys_desc_size;
+	int num_iface;
+	bool iface_hdl[MAX_SUPPORTED_IFACE];
+	int num_connected_iface;
+	int num_resumed_iface;
+	struct ipa_gsb_iface_info *iface[MAX_SUPPORTED_IFACE];
+	u32 pm_hdl;
+};
+
+static struct ipa_gsb_context *ipa_gsb_ctx;
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t ipa_gsb_debugfs_stats(struct file *file,
+				  char __user *ubuf,
+				  size_t count,
+				  loff_t *ppos)
+{
+	int i, nbytes = 0;
+
+	for (i = 0; i < MAX_SUPPORTED_IFACE; i++)
+		if (ipa_gsb_ctx->iface[i] != NULL) {
+			nbytes += scnprintf(&dbg_buff[nbytes],
+				IPA_GSB_MAX_MSG_LEN - nbytes,
+				"netdev: %s\n",
+				ipa_gsb_ctx->iface[i]->netdev_name);
+
+			nbytes += scnprintf(&dbg_buff[nbytes],
+				IPA_GSB_MAX_MSG_LEN - nbytes,
+				"UL packets: %lld\n",
+				ipa_gsb_ctx->iface[i]->
+				iface_stats.num_ul_packets);
+
+			nbytes += scnprintf(&dbg_buff[nbytes],
+				IPA_GSB_MAX_MSG_LEN - nbytes,
+				"DL packets: %lld\n",
+				ipa_gsb_ctx->iface[i]->
+				iface_stats.num_dl_packets);
+
+			nbytes += scnprintf(&dbg_buff[nbytes],
+				IPA_GSB_MAX_MSG_LEN - nbytes,
+				"packets with insufficient headroom: %lld\n",
+				ipa_gsb_ctx->iface[i]->
+				iface_stats.num_insufficient_headroom_packets);
+		}
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static const struct file_operations ipa_gsb_stats_ops = {
+	.read = ipa_gsb_debugfs_stats,
+};
+
+static void ipa_gsb_debugfs_init(void)
+{
+	const mode_t read_only_mode = 00444;
+
+	dent = debugfs_create_dir("ipa_gsb", NULL);
+	if (IS_ERR(dent)) {
+		IPA_GSB_ERR("fail to create folder ipa_gsb\n");
+		return;
+	}
+
+	dfile_stats =
+		debugfs_create_file("stats", read_only_mode, dent,
+					NULL, &ipa_gsb_stats_ops);
+	if (!dfile_stats || IS_ERR(dfile_stats)) {
+		IPA_GSB_ERR("fail to create file stats\n");
+		goto fail;
+	}
+
+	return;
+
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+static void ipa_gsb_debugfs_destroy(void)
+{
+	debugfs_remove_recursive(dent);
+}
+#else
+static void ipa_gsb_debugfs_init(void)
+{
+}
+
+static void ipa_gsb_debugfs_destroy(void)
+{
+}
+#endif
+
+static int ipa_gsb_driver_init(struct odu_bridge_params *params)
+{
+	if (!ipa_is_ready()) {
+		IPA_GSB_ERR("IPA is not ready\n");
+		return -EFAULT;
+	}
+
+	ipa_gsb_ctx = kzalloc(sizeof(*ipa_gsb_ctx),
+		GFP_KERNEL);
+
+	if (!ipa_gsb_ctx)
+		return -ENOMEM;
+
+	mutex_init(&ipa_gsb_ctx->lock);
+	ipa_gsb_debugfs_init();
+
+	return 0;
+}
+
+static int ipa_gsb_commit_partial_hdr(struct ipa_gsb_iface_info *iface_info)
+{
+	int i;
+	struct ipa_ioc_add_hdr *hdr;
+
+	if (!iface_info) {
+		IPA_GSB_ERR("invalid input\n");
+		return -EINVAL;
+	}
+
+	hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) +
+		2 * sizeof(struct ipa_hdr_add), GFP_KERNEL);
+	if (!hdr)
+		return -ENOMEM;
+
+	hdr->commit = 1;
+	hdr->num_hdrs = 2;
+
+	snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name),
+			 "%s_ipv4", iface_info->netdev_name);
+	snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name),
+			 "%s_ipv6", iface_info->netdev_name);
+	/*
+	 * partial header:
+	 * [hdl][QMAP ID][pkt size][Dummy Header][ETH header]
+	 */
+	for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) {
+		/*
+		 * Optimization: add dummy header to reserve space
+		 * for rndis header, so we can do the skb_clone
+		 * instead of deep copy.
+		 */
+		hdr->hdr[i].hdr_len = ETH_HLEN +
+			sizeof(struct ipa_gsb_mux_hdr) +
+			IPA_GSB_SKB_DUMMY_HEADER;
+		hdr->hdr[i].type = IPA_HDR_L2_ETHERNET_II;
+		hdr->hdr[i].is_partial = 1;
+		hdr->hdr[i].is_eth2_ofst_valid = 1;
+		hdr->hdr[i].eth2_ofst = sizeof(struct ipa_gsb_mux_hdr) +
+			IPA_GSB_SKB_DUMMY_HEADER;
+		/* populate iface handle */
+		hdr->hdr[i].hdr[0] = iface_info->iface_hdl;
+		/* populate src ETH address */
+		memcpy(&hdr->hdr[i].hdr[10 + IPA_GSB_SKB_DUMMY_HEADER],
+			iface_info->device_ethaddr, 6);
+		/* populate Ethertype */
+		if (i == IPA_IP_v4)
+			*(u16 *)(hdr->hdr[i].hdr + 16 +
+				IPA_GSB_SKB_DUMMY_HEADER) = htons(ETH_P_IP);
+		else
+			*(u16 *)(hdr->hdr[i].hdr + 16 +
+				IPA_GSB_SKB_DUMMY_HEADER) = htons(ETH_P_IPV6);
+	}
+
+	if (ipa_add_hdr(hdr)) {
+		IPA_GSB_ERR("fail to add partial headers\n");
+		kfree(hdr);
+		return -EFAULT;
+	}
+
+	for (i = IPA_IP_v4; i < IPA_IP_MAX; i++)
+		iface_info->partial_hdr_hdl[i] =
+			hdr->hdr[i].hdr_hdl;
+
+	IPA_GSB_DBG("added partial hdr hdl for ipv4: %d\n",
+		iface_info->partial_hdr_hdl[IPA_IP_v4]);
+	IPA_GSB_DBG("added partial hdr hdl for ipv6: %d\n",
+		iface_info->partial_hdr_hdl[IPA_IP_v6]);
+
+	kfree(hdr);
+	return 0;
+}
+
+static void ipa_gsb_delete_partial_hdr(struct ipa_gsb_iface_info *iface_info)
+{
+	struct ipa_ioc_del_hdr *del_hdr;
+
+	del_hdr = kzalloc(sizeof(struct ipa_ioc_del_hdr) +
+		2 * sizeof(struct ipa_hdr_del), GFP_KERNEL);
+	if (!del_hdr)
+		return;
+
+	del_hdr->commit = 1;
+	del_hdr->num_hdls = 2;
+	del_hdr->hdl[IPA_IP_v4].hdl = iface_info->partial_hdr_hdl[IPA_IP_v4];
+	del_hdr->hdl[IPA_IP_v6].hdl = iface_info->partial_hdr_hdl[IPA_IP_v6];
+
+	if (ipa_del_hdr(del_hdr) != 0)
+		IPA_GSB_ERR("failed to delete partial hdr\n");
+
+	IPA_GSB_DBG("deleted partial hdr hdl for ipv4: %d\n",
+		iface_info->partial_hdr_hdl[IPA_IP_v4]);
+	IPA_GSB_DBG("deleted partial hdr hdl for ipv6: %d\n",
+		iface_info->partial_hdr_hdl[IPA_IP_v6]);
+
+	kfree(del_hdr);
+}
+
+static int ipa_gsb_reg_intf_props(struct ipa_gsb_iface_info *iface_info)
+{
+	struct ipa_tx_intf tx;
+	struct ipa_rx_intf rx;
+	struct ipa_ioc_tx_intf_prop tx_prop[2];
+	struct ipa_ioc_rx_intf_prop rx_prop[2];
+
+	/* populate tx prop */
+	tx.num_props = 2;
+	tx.prop = tx_prop;
+
+	memset(tx_prop, 0, sizeof(tx_prop));
+	tx_prop[0].ip = IPA_IP_v4;
+	tx_prop[0].dst_pipe = IPA_CLIENT_ODU_EMB_CONS;
+	tx_prop[0].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	snprintf(tx_prop[0].hdr_name, sizeof(tx_prop[0].hdr_name),
+			 "%s_ipv4", iface_info->netdev_name);
+
+	tx_prop[1].ip = IPA_IP_v6;
+	tx_prop[1].dst_pipe = IPA_CLIENT_ODU_EMB_CONS;
+	tx_prop[1].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	snprintf(tx_prop[1].hdr_name, sizeof(tx_prop[1].hdr_name),
+			 "%s_ipv6", iface_info->netdev_name);
+
+	/* populate rx prop */
+	rx.num_props = 2;
+	rx.prop = rx_prop;
+
+	memset(rx_prop, 0, sizeof(rx_prop));
+	rx_prop[0].ip = IPA_IP_v4;
+	rx_prop[0].src_pipe = IPA_CLIENT_ODU_PROD;
+	rx_prop[0].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
+	rx_prop[0].attrib.meta_data = iface_info->iface_hdl;
+	rx_prop[0].attrib.meta_data_mask = 0xFF;
+
+	rx_prop[1].ip = IPA_IP_v6;
+	rx_prop[1].src_pipe = IPA_CLIENT_ODU_PROD;
+	rx_prop[1].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
+	rx_prop[1].attrib.meta_data = iface_info->iface_hdl;
+	rx_prop[1].attrib.meta_data_mask = 0xFF;
+
+	if (ipa_register_intf(iface_info->netdev_name, &tx, &rx)) {
+		IPA_GSB_ERR("fail to add interface prop\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static void ipa_gsb_dereg_intf_props(struct ipa_gsb_iface_info *iface_info)
+{
+	if (ipa_deregister_intf(iface_info->netdev_name) != 0)
+		IPA_GSB_ERR("fail to dereg intf props\n");
+
+	IPA_GSB_DBG("deregistered iface props for %s\n",
+		iface_info->netdev_name);
+}
+
+static void ipa_gsb_pm_cb(void *user_data, enum ipa_pm_cb_event event)
+{
+	int i;
+
+	if (event != IPA_PM_REQUEST_WAKEUP) {
+		IPA_GSB_ERR("Unexpected event %d\n", event);
+		WARN_ON(1);
+		return;
+	}
+
+	IPA_GSB_DBG_LOW("wake up clients\n");
+	for (i = 0; i < MAX_SUPPORTED_IFACE; i++)
+		if (ipa_gsb_ctx->iface[i] != NULL)
+			ipa_gsb_ctx->iface[i]->wakeup_request(
+				ipa_gsb_ctx->iface[i]->priv);
+}
+
+static int ipa_gsb_register_pm(void)
+{
+	struct ipa_pm_register_params reg_params;
+	int ret;
+
+	memset(&reg_params, 0, sizeof(reg_params));
+	reg_params.name = "ipa_gsb";
+	reg_params.callback = ipa_gsb_pm_cb;
+	reg_params.user_data = NULL;
+	reg_params.group = IPA_PM_GROUP_DEFAULT;
+
+	ret = ipa_pm_register(&reg_params,
+		&ipa_gsb_ctx->pm_hdl);
+	if (ret) {
+		IPA_GSB_ERR("fail to register with PM %d\n", ret);
+		goto fail_pm_reg;
+	}
+	IPA_GSB_DBG("ipa pm hdl: %d\n", ipa_gsb_ctx->pm_hdl);
+
+	ret = ipa_pm_associate_ipa_cons_to_client(ipa_gsb_ctx->pm_hdl,
+		IPA_CLIENT_ODU_EMB_CONS);
+	if (ret) {
+		IPA_GSB_ERR("fail to associate cons with PM %d\n", ret);
+		goto fail_pm_cons;
+	}
+
+	return 0;
+
+fail_pm_cons:
+	ipa_pm_deregister(ipa_gsb_ctx->pm_hdl);
+	ipa_gsb_ctx->pm_hdl = ~0;
+fail_pm_reg:
+	return ret;
+}
+
+int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl)
+{
+	int i, ret;
+	struct ipa_gsb_iface_info *new_intf;
+
+	if (!params || !params->wakeup_request || !hdl ||
+		!params->info.netdev_name || !params->info.tx_dp_notify ||
+		!params->info.send_dl_skb) {
+		IPA_GSB_ERR("NULL parameters\n");
+		return -EINVAL;
+	}
+
+	IPA_GSB_DBG("netdev_name: %s\n", params->info.netdev_name);
+
+	if (ipa_gsb_ctx == NULL) {
+		ret = ipa_gsb_driver_init(&params->info);
+		if (ret) {
+			IPA_GSB_ERR("fail to init ipa gsb driver\n");
+			return -EFAULT;
+		}
+		ipa_gsb_ctx->ipa_sys_desc_size =
+			params->info.ipa_desc_size;
+		IPA_GSB_DBG("desc size: %d\n", ipa_gsb_ctx->ipa_sys_desc_size);
+	}
+
+	mutex_lock(&ipa_gsb_ctx->lock);
+
+	if (params->info.ipa_desc_size != ipa_gsb_ctx->ipa_sys_desc_size) {
+		IPA_GSB_ERR("unmatch: orig desc size %d, new desc size %d\n",
+			ipa_gsb_ctx->ipa_sys_desc_size,
+			params->info.ipa_desc_size);
+		mutex_unlock(&ipa_gsb_ctx->lock);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < MAX_SUPPORTED_IFACE; i++)
+		if (ipa_gsb_ctx->iface[i] != NULL &&
+			strnlen(ipa_gsb_ctx->iface[i]->netdev_name,
+					IPA_RESOURCE_NAME_MAX) ==
+			strnlen(params->info.netdev_name,
+					IPA_RESOURCE_NAME_MAX) &&
+			strcmp(ipa_gsb_ctx->iface[i]->netdev_name,
+				params->info.netdev_name) == 0) {
+			IPA_GSB_ERR("intf was added before.\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return -EFAULT;
+		}
+
+	if (ipa_gsb_ctx->num_iface == MAX_SUPPORTED_IFACE) {
+		IPA_GSB_ERR("reached maximum supported interfaces");
+		mutex_unlock(&ipa_gsb_ctx->lock);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < MAX_SUPPORTED_IFACE; i++)
+		if (ipa_gsb_ctx->iface_hdl[i] == false) {
+			ipa_gsb_ctx->iface_hdl[i] = true;
+			*hdl = i;
+			IPA_GSB_DBG("iface hdl: %d\n", *hdl);
+			break;
+		}
+
+	IPA_GSB_DBG("intf was not added before, proceed.\n");
+	new_intf = kzalloc(sizeof(*new_intf), GFP_KERNEL);
+	if (new_intf == NULL) {
+		ret = -ENOMEM;
+		goto fail_alloc_mem;
+	}
+
+	strlcpy(new_intf->netdev_name, params->info.netdev_name,
+		sizeof(new_intf->netdev_name));
+	new_intf->wakeup_request = params->wakeup_request;
+	new_intf->priv = params->info.priv;
+	new_intf->tx_dp_notify = params->info.tx_dp_notify;
+	new_intf->send_dl_skb = params->info.send_dl_skb;
+	new_intf->iface_hdl = *hdl;
+	memcpy(new_intf->device_ethaddr, params->info.device_ethaddr,
+		sizeof(new_intf->device_ethaddr));
+
+	if (ipa_gsb_commit_partial_hdr(new_intf) != 0) {
+		IPA_GSB_ERR("fail to commit partial hdrs\n");
+		ret = -EFAULT;
+		goto fail_partial_hdr;
+	}
+
+	if (ipa_gsb_reg_intf_props(new_intf) != 0) {
+		IPA_GSB_ERR("fail to register interface props\n");
+		ret = -EFAULT;
+		goto fail_reg_intf_props;
+	}
+
+	if (ipa_gsb_ctx->num_iface == 0) {
+		ret = ipa_gsb_register_pm();
+		if (ret) {
+			IPA_GSB_ERR("fail to register with IPA PM %d\n", ret);
+			ret = -EFAULT;
+			goto fail_register_pm;
+		}
+	}
+
+	ipa_gsb_ctx->iface[*hdl] = new_intf;
+	ipa_gsb_ctx->num_iface++;
+	IPA_GSB_DBG("num_iface %d\n", ipa_gsb_ctx->num_iface);
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return 0;
+
+fail_register_pm:
+	ipa_gsb_dereg_intf_props(new_intf);
+fail_reg_intf_props:
+	ipa_gsb_delete_partial_hdr(new_intf);
+fail_partial_hdr:
+	kfree(new_intf);
+fail_alloc_mem:
+	ipa_gsb_ctx->iface_hdl[*hdl] = false;
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_bridge_init);
+
+static void ipa_gsb_deregister_pm(void)
+{
+	IPA_GSB_DBG("deregister ipa pm hdl: %d\n", ipa_gsb_ctx->pm_hdl);
+	ipa_pm_deactivate_sync(ipa_gsb_ctx->pm_hdl);
+	ipa_pm_deregister(ipa_gsb_ctx->pm_hdl);
+	ipa_gsb_ctx->pm_hdl = ~0;
+}
+
+int ipa_bridge_cleanup(u32 hdl)
+{
+	if (!ipa_gsb_ctx) {
+		IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n");
+		return -EFAULT;
+	}
+
+	if (hdl >= MAX_SUPPORTED_IFACE) {
+		IPA_GSB_ERR("invalid hdl: %d\n", hdl);
+		return -EINVAL;
+	}
+
+	if (ipa_gsb_ctx->iface[hdl] == NULL) {
+		IPA_GSB_ERR("fail to find interface\n");
+		return -EFAULT;
+	}
+
+	IPA_GSB_DBG("client hdl: %d\n", hdl);
+	mutex_lock(&ipa_gsb_ctx->lock);
+
+	if (ipa_gsb_ctx->iface[hdl]->is_connected) {
+		IPA_GSB_ERR("cannot cleanup when iface is connected\n");
+		mutex_unlock(&ipa_gsb_ctx->lock);
+		return -EFAULT;
+	}
+
+	ipa_gsb_dereg_intf_props(ipa_gsb_ctx->iface[hdl]);
+	ipa_gsb_delete_partial_hdr(ipa_gsb_ctx->iface[hdl]);
+	kfree(ipa_gsb_ctx->iface[hdl]);
+	ipa_gsb_ctx->iface[hdl] = NULL;
+	ipa_gsb_ctx->iface_hdl[hdl] = false;
+	ipa_gsb_ctx->num_iface--;
+	IPA_GSB_DBG("num_iface %d\n", ipa_gsb_ctx->num_iface);
+
+	if (ipa_gsb_ctx->num_iface == 0) {
+		ipa_gsb_deregister_pm();
+		ipa_gsb_debugfs_destroy();
+		ipc_log_context_destroy(ipa_gsb_ctx->logbuf);
+		ipc_log_context_destroy(ipa_gsb_ctx->logbuf_low);
+		mutex_unlock(&ipa_gsb_ctx->lock);
+		kfree(ipa_gsb_ctx);
+		ipa_gsb_ctx = NULL;
+		return 0;
+	}
+
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_cleanup);
+
+static void ipa_gsb_cons_cb(void *priv, enum ipa_dp_evt_type evt,
+	unsigned long data)
+{
+	struct sk_buff *skb;
+	struct sk_buff *skb2;
+	struct ipa_gsb_mux_hdr *mux_hdr;
+	u16 pkt_size, pad_byte;
+	u8 hdl;
+
+	if (evt != IPA_RECEIVE) {
+		IPA_GSB_ERR("unexpected event\n");
+		WARN_ON(1);
+		return;
+	}
+
+	skb = (struct sk_buff *)data;
+
+	while (skb->len) {
+		mux_hdr = (struct ipa_gsb_mux_hdr *)skb->data;
+		pkt_size = mux_hdr->pkt_size;
+		/* 4-byte padding */
+		pad_byte = ((pkt_size + sizeof(*mux_hdr) + ETH_HLEN +
+			3 + IPA_GSB_SKB_DUMMY_HEADER) & ~3) -
+			(pkt_size + sizeof(*mux_hdr) +
+			ETH_HLEN + IPA_GSB_SKB_DUMMY_HEADER);
+		hdl = mux_hdr->iface_hdl;
+		IPA_GSB_DBG_LOW("pkt_size: %d, pad_byte: %d, hdl: %d\n",
+			pkt_size, pad_byte, hdl);
+
+		/* remove 4 byte mux header AND dummy header*/
+		skb_pull(skb, sizeof(*mux_hdr) + IPA_GSB_SKB_DUMMY_HEADER);
+
+		skb2 = skb_clone(skb, GFP_KERNEL);
+		if (!skb2) {
+			IPA_GSB_ERR("skb_clone failed\n");
+			WARN_ON(1);
+			break;
+		}
+		skb_trim(skb2, pkt_size + ETH_HLEN);
+		ipa_gsb_ctx->iface[hdl]->send_dl_skb(
+			ipa_gsb_ctx->iface[hdl]->priv, skb2);
+		ipa_gsb_ctx->iface[hdl]->iface_stats.num_dl_packets++;
+		skb_pull(skb, pkt_size + ETH_HLEN + pad_byte);
+	}
+	if (skb) {
+		dev_kfree_skb_any(skb);
+		skb = NULL;
+	}
+}
+
+static void ipa_gsb_tx_dp_notify(void *priv, enum ipa_dp_evt_type evt,
+		       unsigned long data)
+{
+	struct sk_buff *skb;
+	struct ipa_gsb_mux_hdr *mux_hdr;
+	u8 hdl;
+
+	skb = (struct sk_buff *)data;
+
+	if (evt != IPA_WRITE_DONE && evt != IPA_RECEIVE) {
+		IPA_GSB_ERR("unexpected event: %d\n", evt);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	/* fetch iface handle from header */
+	mux_hdr = (struct ipa_gsb_mux_hdr *)skb->data;
+	/* change to host order */
+	*(u32 *)mux_hdr = ntohl(*(u32 *)mux_hdr);
+	hdl = mux_hdr->iface_hdl;
+	IPA_GSB_DBG_LOW("evt: %d, hdl in tx_dp_notify: %d\n", evt, hdl);
+
+	/* remove 4 byte mux header */
+	skb_pull(skb, sizeof(struct ipa_gsb_mux_hdr));
+	ipa_gsb_ctx->iface[hdl]->tx_dp_notify(
+		ipa_gsb_ctx->iface[hdl]->priv, evt,
+		(unsigned long)skb);
+}
+
+static int ipa_gsb_connect_sys_pipe(void)
+{
+	struct ipa_sys_connect_params prod_params;
+	struct ipa_sys_connect_params cons_params;
+	int res;
+
+	memset(&prod_params, 0, sizeof(prod_params));
+	memset(&cons_params, 0, sizeof(cons_params));
+
+	/* configure RX EP */
+	prod_params.client = IPA_CLIENT_ODU_PROD;
+	prod_params.ipa_ep_cfg.hdr.hdr_len =
+		ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr);
+	prod_params.ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT;
+	prod_params.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
+	prod_params.ipa_ep_cfg.hdr.hdr_ofst_metadata = 0;
+	prod_params.desc_fifo_sz = ipa_gsb_ctx->ipa_sys_desc_size;
+	prod_params.priv = NULL;
+	prod_params.notify = ipa_gsb_tx_dp_notify;
+	res = ipa_setup_sys_pipe(&prod_params,
+		&ipa_gsb_ctx->prod_hdl);
+	if (res) {
+		IPA_GSB_ERR("fail to setup prod sys pipe %d\n", res);
+		goto fail_prod;
+	}
+
+	/* configure TX EP */
+	cons_params.client = IPA_CLIENT_ODU_EMB_CONS;
+	cons_params.ipa_ep_cfg.hdr.hdr_len =
+		ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr) +
+		IPA_GSB_SKB_DUMMY_HEADER;
+	cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
+	cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
+	cons_params.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
+	cons_params.ipa_ep_cfg.hdr_ext.hdr_little_endian = true;
+	cons_params.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
+	/* setup aggregation */
+	cons_params.ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+	cons_params.ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
+	cons_params.ipa_ep_cfg.aggr.aggr_time_limit =
+		IPA_GSB_AGGR_TIME_LIMIT;
+	cons_params.ipa_ep_cfg.aggr.aggr_byte_limit =
+		IPA_GSB_AGGR_BYTE_LIMIT;
+	cons_params.desc_fifo_sz = ipa_gsb_ctx->ipa_sys_desc_size;
+	cons_params.priv = NULL;
+	cons_params.notify = ipa_gsb_cons_cb;
+	res = ipa_setup_sys_pipe(&cons_params,
+		&ipa_gsb_ctx->cons_hdl);
+	if (res) {
+		IPA_GSB_ERR("fail to setup cons sys pipe %d\n", res);
+		goto fail_cons;
+	}
+
+	IPA_GSB_DBG("prod_hdl = %d, cons_hdl = %d\n",
+		ipa_gsb_ctx->prod_hdl, ipa_gsb_ctx->cons_hdl);
+
+	return 0;
+
+fail_cons:
+	ipa_teardown_sys_pipe(ipa_gsb_ctx->prod_hdl);
+	ipa_gsb_ctx->prod_hdl = 0;
+fail_prod:
+	return res;
+}
+
+int ipa_bridge_connect(u32 hdl)
+{
+	int ret;
+
+	if (!ipa_gsb_ctx) {
+		IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n");
+		return -EFAULT;
+	}
+
+	IPA_GSB_DBG("client hdl: %d\n", hdl);
+
+	mutex_lock(&ipa_gsb_ctx->lock);
+
+	if (ipa_gsb_ctx->iface[hdl]->is_connected) {
+		IPA_GSB_DBG("iface was already connected\n");
+		mutex_unlock(&ipa_gsb_ctx->lock);
+		return 0;
+	}
+
+	if (ipa_gsb_ctx->num_connected_iface == 0) {
+		ret = ipa_pm_activate_sync(ipa_gsb_ctx->pm_hdl);
+		if (ret) {
+			IPA_GSB_ERR("failed to activate ipa pm\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return ret;
+		}
+		ret = ipa_gsb_connect_sys_pipe();
+		if (ret) {
+			IPA_GSB_ERR("fail to connect pipe\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return ret;
+		}
+	}
+
+	/* connect = connect + resume */
+	ipa_gsb_ctx->iface[hdl]->is_connected = true;
+	ipa_gsb_ctx->iface[hdl]->is_resumed = true;
+
+	ipa_gsb_ctx->num_connected_iface++;
+	IPA_GSB_DBG("connected iface: %d\n",
+		ipa_gsb_ctx->num_connected_iface);
+	ipa_gsb_ctx->num_resumed_iface++;
+	IPA_GSB_DBG("num resumed iface: %d\n",
+		ipa_gsb_ctx->num_resumed_iface);
+
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_connect);
+
+static int ipa_gsb_disconnect_sys_pipe(void)
+{
+	int ret;
+
+	IPA_GSB_DBG("prod_hdl = %d, cons_hdl = %d\n",
+		ipa_gsb_ctx->prod_hdl, ipa_gsb_ctx->cons_hdl);
+
+	ret = ipa_teardown_sys_pipe(ipa_gsb_ctx->prod_hdl);
+	if (ret) {
+		IPA_GSB_ERR("failed to tear down prod pipe\n");
+		return -EFAULT;
+	}
+	ipa_gsb_ctx->prod_hdl = 0;
+
+	ret = ipa_teardown_sys_pipe(ipa_gsb_ctx->cons_hdl);
+	if (ret) {
+		IPA_GSB_ERR("failed to tear down cons pipe\n");
+		return -EFAULT;
+	}
+	ipa_gsb_ctx->cons_hdl = 0;
+
+	return 0;
+}
+
+int ipa_bridge_disconnect(u32 hdl)
+{
+	int ret;
+
+	if (!ipa_gsb_ctx) {
+		IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n");
+		return -EFAULT;
+	}
+
+	IPA_GSB_DBG("client hdl: %d\n", hdl);
+
+	mutex_lock(&ipa_gsb_ctx->lock);
+
+	if (!ipa_gsb_ctx->iface[hdl]->is_connected) {
+		IPA_GSB_DBG("iface was not connected\n");
+		mutex_unlock(&ipa_gsb_ctx->lock);
+		return 0;
+	}
+
+	if (ipa_gsb_ctx->num_connected_iface == 1) {
+		ret = ipa_gsb_disconnect_sys_pipe();
+		if (ret) {
+			IPA_GSB_ERR("fail to discon pipes\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return -EFAULT;
+		}
+
+		ret = ipa_pm_deactivate_sync(ipa_gsb_ctx->pm_hdl);
+		if (ret) {
+			IPA_GSB_ERR("failed to deactivate ipa pm\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return -EFAULT;
+		}
+	}
+
+	/* disconnect = suspend + disconnect */
+	ipa_gsb_ctx->iface[hdl]->is_connected = false;
+	ipa_gsb_ctx->num_connected_iface--;
+	IPA_GSB_DBG("connected iface: %d\n",
+		ipa_gsb_ctx->num_connected_iface);
+
+	if (ipa_gsb_ctx->iface[hdl]->is_resumed) {
+		ipa_gsb_ctx->iface[hdl]->is_resumed = false;
+		ipa_gsb_ctx->num_resumed_iface--;
+		IPA_GSB_DBG("num resumed iface: %d\n",
+			ipa_gsb_ctx->num_resumed_iface);
+	}
+
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_disconnect);
+
+int ipa_bridge_resume(u32 hdl)
+{
+	int ret;
+
+	if (!ipa_gsb_ctx) {
+		IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n");
+		return -EFAULT;
+	}
+
+	IPA_GSB_DBG_LOW("client hdl: %d\n", hdl);
+
+	if (!ipa_gsb_ctx->iface[hdl]->is_connected) {
+		IPA_GSB_ERR("iface is not connected\n");
+		return -EFAULT;
+	}
+
+	if (ipa_gsb_ctx->iface[hdl]->is_resumed) {
+		IPA_GSB_DBG_LOW("iface was already resumed\n");
+		return 0;
+	}
+
+	mutex_lock(&ipa_gsb_ctx->lock);
+
+	if (ipa_gsb_ctx->num_resumed_iface == 0) {
+		ret = ipa_pm_activate_sync(ipa_gsb_ctx->pm_hdl);
+		if (ret) {
+			IPA_GSB_ERR("fail to activate ipa pm\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return ret;
+		}
+
+		ret = ipa_start_gsi_channel(
+			ipa_gsb_ctx->cons_hdl);
+		if (ret) {
+			IPA_GSB_ERR(
+				"fail to start con ep %d\n",
+				ret);
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return ret;
+		}
+	}
+
+	ipa_gsb_ctx->iface[hdl]->is_resumed = true;
+	ipa_gsb_ctx->num_resumed_iface++;
+	IPA_GSB_DBG_LOW("num resumed iface: %d\n",
+		ipa_gsb_ctx->num_resumed_iface);
+
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_resume);
+
+int ipa_bridge_suspend(u32 hdl)
+{
+	int ret;
+
+	if (!ipa_gsb_ctx) {
+		IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n");
+		return -EFAULT;
+	}
+
+	IPA_GSB_DBG_LOW("client hdl: %d\n", hdl);
+
+	if (!ipa_gsb_ctx->iface[hdl]->is_connected) {
+		IPA_GSB_ERR("iface is not connected\n");
+		return -EFAULT;
+	}
+
+	if (!ipa_gsb_ctx->iface[hdl]->is_resumed) {
+		IPA_GSB_DBG_LOW("iface was already suspended\n");
+		return 0;
+	}
+
+	mutex_lock(&ipa_gsb_ctx->lock);
+
+	if (ipa_gsb_ctx->num_resumed_iface == 1) {
+		ret = ipa_stop_gsi_channel(
+			ipa_gsb_ctx->cons_hdl);
+		if (ret) {
+			IPA_GSB_ERR(
+				"fail to stop cons ep %d\n",
+				ret);
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return ret;
+		}
+
+		ret = ipa_pm_deactivate_sync(ipa_gsb_ctx->pm_hdl);
+		if (ret) {
+			IPA_GSB_ERR("fail to deactivate ipa pm\n");
+			ipa_start_gsi_channel(ipa_gsb_ctx->cons_hdl);
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return ret;
+		}
+	}
+
+	ipa_gsb_ctx->iface[hdl]->is_resumed = false;
+	ipa_gsb_ctx->num_resumed_iface--;
+	IPA_GSB_DBG_LOW("num resumed iface: %d\n",
+		ipa_gsb_ctx->num_resumed_iface);
+
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_suspend);
+
+int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth)
+{
+	int ret;
+
+	IPA_GSB_DBG("client hdl: %d, BW: %d\n", hdl, bandwidth);
+
+	mutex_lock(&ipa_gsb_ctx->lock);
+
+	ret = ipa_pm_set_perf_profile(ipa_gsb_ctx->pm_hdl,
+		bandwidth);
+	if (ret)
+		IPA_GSB_ERR("fail to set perf profile\n");
+
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_bridge_set_perf_profile);
+
+int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
+	struct ipa_tx_meta *metadata)
+{
+	struct ipa_gsb_mux_hdr *mux_hdr;
+	struct sk_buff *skb2;
+	int ret;
+
+	IPA_GSB_DBG_LOW("client hdl: %d\n", hdl);
+
+	/* make sure skb has enough headroom */
+	if (unlikely(skb_headroom(skb) < sizeof(struct ipa_gsb_mux_hdr))) {
+		IPA_GSB_DBG_LOW("skb doesn't have enough headroom\n");
+		skb2 = skb_copy_expand(skb, sizeof(struct ipa_gsb_mux_hdr),
+			0, GFP_ATOMIC);
+		if (!skb2) {
+			dev_kfree_skb_any(skb);
+			return -ENOMEM;
+		}
+		dev_kfree_skb_any(skb);
+		skb = skb2;
+		ipa_gsb_ctx->iface[hdl]->iface_stats.
+			num_insufficient_headroom_packets++;
+	}
+
+	/* add 4 byte header for mux */
+	mux_hdr = (struct ipa_gsb_mux_hdr *)skb_push(skb,
+		sizeof(struct ipa_gsb_mux_hdr));
+	mux_hdr->iface_hdl = (u8)hdl;
+	/* change to network order */
+	*(u32 *)mux_hdr = htonl(*(u32 *)mux_hdr);
+
+	ret = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
+	if (ret) {
+		IPA_GSB_ERR("tx dp failed %d\n", ret);
+		return -EFAULT;
+	}
+	ipa_gsb_ctx->iface[hdl]->iface_stats.num_ul_packets++;
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_tx_dp);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ipa gsb driver");
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
index 906e911..b4af0a09 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2017-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,13 +24,39 @@
 #endif
 
 #define IPA_MHI_DRV_NAME "ipa_mhi_client"
+
 #define IPA_MHI_DBG(fmt, args...) \
-	pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
-		 __func__, __LINE__, ## args)
+	do { \
+		pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_MHI_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
 #define IPA_MHI_ERR(fmt, args...) \
-	pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+	do { \
+		pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+				IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+				IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
 #define IPA_MHI_FUNC_ENTRY() \
 	IPA_MHI_DBG("ENTRY\n")
+
 #define IPA_MHI_FUNC_EXIT() \
 	IPA_MHI_DBG("EXIT\n")
 
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index d9e3ab9..704308f 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -1336,7 +1336,12 @@
 	chan_params.chan_params.ring_base_addr =
 		params->xfer_ring_base_addr_iova;
 	chan_params.chan_params.ring_base_vaddr = NULL;
-	chan_params.chan_params.use_db_eng = GSI_CHAN_DB_MODE;
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		chan_params.chan_params.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	else
+		chan_params.chan_params.use_db_eng = GSI_CHAN_DB_MODE;
+	chan_params.chan_params.prefetch_mode =
+		ipa_get_ep_prefetch_mode(chan_params.client);
 	chan_params.chan_params.max_prefetch = GSI_ONE_PREFETCH_SEG;
 	if (params->dir == GSI_CHAN_DIR_FROM_GSI)
 		chan_params.chan_params.low_weight =
@@ -2153,6 +2158,18 @@
 static void ipa_usb_debugfs_remove(void){}
 #endif /* CONFIG_DEBUG_FS */
 
+static int ipa_usb_set_lock_unlock(bool is_lock)
+{
+	IPA_USB_DBG("entry\n");
+	if (is_lock)
+		mutex_lock(&ipa3_usb_ctx->general_mutex);
+	else
+		mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	IPA_USB_DBG("exit\n");
+
+	return 0;
+}
+
 
 
 int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
@@ -2216,6 +2233,16 @@
 		goto connect_fail;
 	}
 
+	/*
+	 * Register for xdci lock/unlock callback with ipa core driver.
+	 * As per use case, only register for IPA_CONS end point for now.
+	 * If needed we can include the same for IPA_PROD ep.
+	 * For IPA_USB_DIAG/DPL config there will not be any UL ep.
+	 */
+	if (connect_params->teth_prot != IPA_USB_DIAG)
+		ipa3_register_lock_unlock_callback(&ipa_usb_set_lock_unlock,
+			ul_out_params->clnt_hdl);
+
 	IPA_USB_DBG_LOW("exit\n");
 	mutex_unlock(&ipa3_usb_ctx->general_mutex);
 	return 0;
@@ -2293,6 +2320,15 @@
 		}
 	}
 
+	/*
+	 * Deregister for xdci lock/unlock callback from ipa core driver.
+	 * As per use case, only deregister for IPA_CONS end point for now.
+	 * If needed we can include the same for IPA_PROD ep.
+	 * For IPA_USB_DIAG/DPL config there will not be any UL config.
+	 */
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype))
+		ipa3_deregister_lock_unlock_callback(ul_clnt_hdl);
+
 	/* Change state to STOPPED */
 	if (!ipa3_usb_set_state(IPA_USB_STOPPED, false, ttype))
 		IPA_USB_ERR("failed to change state to stopped\n");
@@ -2916,6 +2952,7 @@
 	int i;
 	unsigned long flags;
 	int res;
+	struct ipa3_usb_pm_context *pm_ctx;
 
 	pr_debug("entry\n");
 	ipa3_usb_ctx = kzalloc(sizeof(struct ipa3_usb_context), GFP_KERNEL);
@@ -2935,19 +2972,13 @@
 	ipa3_usb_ctx->dl_data_pending = false;
 	mutex_init(&ipa3_usb_ctx->general_mutex);
 
-	if (ipa_pm_is_used()) {
-		struct ipa3_usb_pm_context *pm_ctx;
-
-		pm_ctx =
-			&ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].pm_ctx;
-		pm_ctx->hdl = ~0;
-		pm_ctx->remote_wakeup_work =
-			&ipa3_usb_notify_remote_wakeup_work;
-		pm_ctx = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].pm_ctx;
-		pm_ctx->hdl = ~0;
-		pm_ctx->remote_wakeup_work =
-			&ipa3_usb_dpl_notify_remote_wakeup_work;
-	}
+	/* init PM related members */
+	pm_ctx = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].pm_ctx;
+	pm_ctx->hdl = ~0;
+	pm_ctx->remote_wakeup_work = &ipa3_usb_notify_remote_wakeup_work;
+	pm_ctx = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].pm_ctx;
+	pm_ctx->hdl = ~0;
+	pm_ctx->remote_wakeup_work = &ipa3_usb_dpl_notify_remote_wakeup_work;
 
 	for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) {
 		ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_valid = false;
diff --git a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
index df546cd..3228410 100644
--- a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1262,385 +1262,5 @@
 }
 EXPORT_SYMBOL(odu_bridge_cleanup);
 
-/* IPA Bridge implementation */
-#ifdef CONFIG_IPA3
-
-static void ipa_br_rm_notify(void *user_data, enum ipa_rm_event event,
-	unsigned long data)
-{
-	if (event == IPA_RM_RESOURCE_GRANTED)
-		complete(&odu_bridge_ctx->rm_comp);
-}
-
-static int ipa_br_request_prod(void)
-{
-	int res;
-
-	ODU_BRIDGE_FUNC_ENTRY();
-
-	reinit_completion(&odu_bridge_ctx->rm_comp);
-	ODU_BRIDGE_DBG("requesting odu prod\n");
-	res = ipa_rm_request_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
-	if (res) {
-		if (res != -EINPROGRESS) {
-			ODU_BRIDGE_ERR("failed to request prod %d\n", res);
-			return res;
-		}
-		wait_for_completion(&odu_bridge_ctx->rm_comp);
-	}
-
-	ODU_BRIDGE_FUNC_EXIT();
-	return 0;
-
-}
-
-static int ipa_br_release_prod(void)
-{
-	int res;
-
-	ODU_BRIDGE_FUNC_ENTRY();
-
-	reinit_completion(&odu_bridge_ctx->rm_comp);
-	ODU_BRIDGE_DBG("requesting odu prod\n");
-	res = ipa_rm_release_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
-	if (res) {
-		ODU_BRIDGE_ERR("failed to release prod %d\n", res);
-		return res;
-	}
-
-	ODU_BRIDGE_FUNC_EXIT();
-	return 0;
-
-}
-
-static int ipa_br_cons_request(void)
-{
-	ODU_BRIDGE_FUNC_ENTRY();
-	if (odu_bridge_ctx->is_suspended)
-		odu_bridge_ctx->wakeup_request(odu_bridge_ctx->priv);
-	ODU_BRIDGE_FUNC_EXIT();
-	return 0;
-}
-
-static int ipa_br_cons_release(void)
-{
-	ODU_BRIDGE_FUNC_ENTRY();
-	ODU_BRIDGE_FUNC_EXIT();
-	return 0;
-}
-
-static void ipa_br_pm_cb(void *p, enum ipa_pm_cb_event event)
-{
-	ODU_BRIDGE_FUNC_ENTRY();
-	if (event != IPA_PM_REQUEST_WAKEUP) {
-		ODU_BRIDGE_ERR("Unexpected event %d\n", event);
-		WARN_ON(1);
-		return;
-	}
-
-	if (odu_bridge_ctx->is_suspended)
-		odu_bridge_ctx->wakeup_request(odu_bridge_ctx->priv);
-	ODU_BRIDGE_FUNC_EXIT();
-}
-
-static int ipa_br_register_pm(void)
-{
-	struct ipa_pm_register_params reg_params;
-	int ret;
-
-	memset(&reg_params, 0, sizeof(reg_params));
-	reg_params.name = "ODU Bridge";
-	reg_params.callback = ipa_br_pm_cb;
-	reg_params.group = IPA_PM_GROUP_DEFAULT;
-
-	ret = ipa_pm_register(&reg_params,
-		&odu_bridge_ctx->pm_hdl);
-	if (ret) {
-		ODU_BRIDGE_ERR("fail to register with PM %d\n", ret);
-		goto fail_pm_reg;
-	}
-
-	ret = ipa_pm_associate_ipa_cons_to_client(odu_bridge_ctx->pm_hdl,
-		IPA_CLIENT_ODU_EMB_CONS);
-	if (ret) {
-		ODU_BRIDGE_ERR("fail to associate cons with PM %d\n", ret);
-		goto fail_pm_cons;
-	}
-
-	return 0;
-
-fail_pm_cons:
-	ipa_pm_deregister(odu_bridge_ctx->pm_hdl);
-	odu_bridge_ctx->pm_hdl = ~0;
-fail_pm_reg:
-	return ret;
-}
-
-static int ipa_br_create_rm_resources(void)
-{
-	int ret;
-	struct ipa_rm_create_params create_params;
-
-	/* create IPA RM resources for power management */
-	init_completion(&odu_bridge_ctx->rm_comp);
-	memset(&create_params, 0, sizeof(create_params));
-	create_params.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
-	create_params.reg_params.user_data = odu_bridge_ctx;
-	create_params.reg_params.notify_cb = ipa_br_rm_notify;
-	create_params.floor_voltage = IPA_VOLTAGE_SVS;
-	ret = ipa_rm_create_resource(&create_params);
-	if (ret) {
-		ODU_BRIDGE_ERR("failed to create RM prod %d\n", ret);
-		goto fail_rm_prod;
-	}
-
-	ret = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
-		IPA_RM_RESOURCE_APPS_CONS);
-	if (ret) {
-		ODU_BRIDGE_ERR("failed to add ODU->APPS dependency %d\n", ret);
-		goto fail_add_dep;
-	}
-
-	memset(&create_params, 0, sizeof(create_params));
-	create_params.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
-	create_params.request_resource = ipa_br_cons_request;
-	create_params.release_resource = ipa_br_cons_release;
-	create_params.floor_voltage = IPA_VOLTAGE_SVS;
-	ret = ipa_rm_create_resource(&create_params);
-	if (ret) {
-		ODU_BRIDGE_ERR("failed to create RM cons %d\n", ret);
-		goto fail_rm_cons;
-	}
-
-	return 0;
-
-fail_rm_cons:
-	ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
-		IPA_RM_RESOURCE_APPS_CONS);
-fail_add_dep:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
-fail_rm_prod:
-	return ret;
-}
-
-/* IPA Bridge API is the new API which will replaces old odu_bridge API */
-int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl)
-{
-	int ret;
-
-	if (!params || !params->wakeup_request || !hdl) {
-		ODU_BRIDGE_ERR("NULL arg\n");
-		return -EINVAL;
-	}
-
-
-	ret = odu_bridge_init(&params->info);
-	if (ret)
-		return ret;
-
-	odu_bridge_ctx->wakeup_request = params->wakeup_request;
-
-	if (ipa_pm_is_used())
-		ret = ipa_br_register_pm();
-	else
-		ret = ipa_br_create_rm_resources();
-	if (ret) {
-		ODU_BRIDGE_ERR("fail to register woth RM/PM %d\n", ret);
-		goto fail_pm;
-	}
-
-	/* handle is ignored for now */
-	*hdl = 0;
-
-	return 0;
-
-fail_pm:
-	odu_bridge_cleanup();
-	return ret;
-}
-EXPORT_SYMBOL(ipa_bridge_init);
-
-int ipa_bridge_connect(u32 hdl)
-{
-	int ret;
-
-	if (!odu_bridge_ctx) {
-		ODU_BRIDGE_ERR("Not initialized\n");
-		return -EFAULT;
-	}
-
-	if (odu_bridge_ctx->is_connected) {
-		ODU_BRIDGE_ERR("already connected\n");
-		return -EFAULT;
-	}
-
-	if (ipa_pm_is_used())
-		ret = ipa_pm_activate_sync(odu_bridge_ctx->pm_hdl);
-	else
-		ret = ipa_br_request_prod();
-	if (ret)
-		return ret;
-
-	return odu_bridge_connect();
-}
-EXPORT_SYMBOL(ipa_bridge_connect);
-
-int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth)
-{
-	struct ipa_rm_perf_profile profile = {0};
-	int ret;
-
-	if (ipa_pm_is_used())
-		return ipa_pm_set_perf_profile(odu_bridge_ctx->pm_hdl,
-			bandwidth);
-
-	profile.max_supported_bandwidth_mbps = bandwidth;
-	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_ODU_ADAPT_PROD, &profile);
-	if (ret) {
-		ODU_BRIDGE_ERR("failed to set perf profile to prod %d\n", ret);
-		return ret;
-	}
-
-	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_ODU_ADAPT_CONS, &profile);
-	if (ret) {
-		ODU_BRIDGE_ERR("failed to set perf profile to cons %d\n", ret);
-		return ret;
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL(ipa_bridge_set_perf_profile);
-
-int ipa_bridge_disconnect(u32 hdl)
-{
-	int ret;
-
-	ret = odu_bridge_disconnect();
-	if (ret)
-		return ret;
-
-	if (ipa_pm_is_used())
-		ret = ipa_pm_deactivate_sync(odu_bridge_ctx->pm_hdl);
-	else
-		ret = ipa_br_release_prod();
-	if (ret)
-		return ret;
-
-	return 0;
-}
-EXPORT_SYMBOL(ipa_bridge_disconnect);
-
-int ipa_bridge_suspend(u32 hdl)
-{
-	int ret;
-
-	if (!odu_bridge_ctx) {
-		ODU_BRIDGE_ERR("Not initialized\n");
-		return -EFAULT;
-	}
-
-	if (!odu_bridge_ctx->is_connected) {
-		ODU_BRIDGE_ERR("bridge is  disconnected\n");
-		return -EFAULT;
-	}
-
-	if (odu_bridge_ctx->is_suspended) {
-		ODU_BRIDGE_ERR("bridge is already suspended\n");
-		return -EFAULT;
-	}
-
-	/* stop cons channel to prevent downlink data during suspend */
-	ret = ipa_stop_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl);
-	if (ret) {
-		ODU_BRIDGE_ERR("failed to stop CONS channel %d\n", ret);
-		return ret;
-	}
-
-	if (ipa_pm_is_used())
-		ret = ipa_pm_deactivate_sync(odu_bridge_ctx->pm_hdl);
-	else
-		ret = ipa_br_release_prod();
-	if (ret) {
-		ODU_BRIDGE_ERR("failed to release prod %d\n", ret);
-		ipa_start_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl);
-		return ret;
-	}
-	odu_bridge_ctx->is_suspended = true;
-
-	return 0;
-}
-EXPORT_SYMBOL(ipa_bridge_suspend);
-
-int ipa_bridge_resume(u32 hdl)
-{
-	int ret;
-
-	if (!odu_bridge_ctx) {
-		ODU_BRIDGE_ERR("Not initialized\n");
-		return -EFAULT;
-	}
-
-	if (!odu_bridge_ctx->is_connected) {
-		ODU_BRIDGE_ERR("bridge is  disconnected\n");
-		return -EFAULT;
-	}
-
-	if (!odu_bridge_ctx->is_suspended) {
-		ODU_BRIDGE_ERR("bridge is not suspended\n");
-		return -EFAULT;
-	}
-
-	if (ipa_pm_is_used())
-		ret = ipa_pm_activate_sync(odu_bridge_ctx->pm_hdl);
-	else
-		ret = ipa_br_request_prod();
-	if (ret)
-		return ret;
-
-	ret = ipa_start_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl);
-	if (ret) {
-		ODU_BRIDGE_ERR("failed to start CONS channel %d\n", ret);
-		return ret;
-	}
-	odu_bridge_ctx->is_suspended = false;
-
-	return 0;
-}
-EXPORT_SYMBOL(ipa_bridge_resume);
-
-int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
-	struct ipa_tx_meta *metadata)
-{
-	return odu_bridge_tx_dp(skb, metadata);
-}
-EXPORT_SYMBOL(ipa_bridge_tx_dp);
-
-static void ipa_br_delete_rm_resources(void)
-{
-	ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
-		IPA_RM_RESOURCE_APPS_CONS);
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
-}
-
-static void ipa_br_deregister_pm(void)
-{
-	ipa_pm_deactivate_sync(odu_bridge_ctx->pm_hdl);
-	ipa_pm_deregister(odu_bridge_ctx->pm_hdl);
-	odu_bridge_ctx->pm_hdl = ~0;
-}
-
-int ipa_bridge_cleanup(u32 hdl)
-{
-	if (ipa_pm_is_used())
-		ipa_br_deregister_pm();
-	else
-		ipa_br_delete_rm_resources();
-	return odu_bridge_cleanup();
-}
-EXPORT_SYMBOL(ipa_bridge_cleanup);
-
-#endif /* CONFIG_IPA3 */
-
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("ODU bridge driver");
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index b8a517e..530aa54 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -15,6 +15,7 @@
 
 #ifndef _IPA_COMMON_I_H_
 #define _IPA_COMMON_I_H_
+#include <linux/errno.h>
 #include <linux/ipc_logging.h>
 #include <linux/ipa.h>
 #include <linux/ipa_uc_offload.h>
@@ -441,4 +442,7 @@
 		struct sg_table *in_sgt_ptr);
 int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr);
 
+int ipa_ut_module_init(void);
+void ipa_ut_module_exit(void);
+
 #endif /* _IPA_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index 953469a..c2f7aae 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -89,6 +89,8 @@
 	__stringify(DEL_L2TP_VLAN_MAPPING),
 	__stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT),
 	__stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT),
+	__stringify(ADD_BRIDGE_VLAN_MAPPING),
+	__stringify(DEL_BRIDGE_VLAN_MAPPING),
 };
 
 const char *ipa_hdr_l2_type_name[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 9f71d7b..ba98228 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2115,8 +2115,10 @@
 			goto fail_dma_mapping;
 		}
 
+		spin_lock_bh(&sys->spinlock);
 		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
 		rx_len_cached = ++sys->len;
+		spin_unlock_bh(&sys->spinlock);
 
 		ret = sps_transfer_one(sys->ep->ep_hdl,
 			rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
@@ -2130,8 +2132,10 @@
 	return;
 
 fail_sps_transfer:
+	spin_lock_bh(&sys->spinlock);
 	list_del(&rx_pkt->link);
 	rx_len_cached = --sys->len;
+	spin_unlock_bh(&sys->spinlock);
 	dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
 			sys->rx_buff_sz, DMA_FROM_DEVICE);
 fail_dma_mapping:
@@ -2171,8 +2175,10 @@
 			goto fail_dma_mapping;
 		}
 
+		spin_lock_bh(&sys->spinlock);
 		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
 		rx_len_cached = ++sys->len;
+		spin_unlock_bh(&sys->spinlock);
 
 		ret = sps_transfer_one(sys->ep->ep_hdl,
 			rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
@@ -2185,9 +2191,11 @@
 
 	return;
 fail_sps_transfer:
+	spin_lock_bh(&sys->spinlock);
 	rx_len_cached = --sys->len;
 	list_del(&rx_pkt->link);
 	INIT_LIST_HEAD(&rx_pkt->link);
+	spin_unlock_bh(&sys->spinlock);
 	dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
 		sys->rx_buff_sz, DMA_FROM_DEVICE);
 fail_dma_mapping:
@@ -2219,7 +2227,9 @@
 		}
 
 		rx_pkt = sys->repl.cache[curr];
+		spin_lock_bh(&sys->spinlock);
 		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+		spin_unlock_bh(&sys->spinlock);
 
 		ret = sps_transfer_one(sys->ep->ep_hdl,
 			rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
@@ -2278,6 +2288,7 @@
 	u32 head;
 	u32 tail;
 
+	spin_lock_bh(&sys->spinlock);
 	list_for_each_entry_safe(rx_pkt, r,
 				 &sys->head_desc_list, link) {
 		list_del(&rx_pkt->link);
@@ -2295,6 +2306,7 @@
 		sys->free_skb(rx_pkt->data.skb);
 		kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
 	}
+	spin_unlock_bh(&sys->spinlock);
 
 	if (sys->repl.cache) {
 		head = atomic_read(&sys->repl.head_idx);
@@ -2976,8 +2988,10 @@
 	struct ipa_rx_pkt_wrapper *rx_pkt_expected;
 	struct sk_buff *rx_skb;
 
+	spin_lock_bh(&sys->spinlock);
 	if (unlikely(list_empty(&sys->head_desc_list))) {
 		WARN_ON(1);
+		spin_unlock_bh(&sys->spinlock);
 		return;
 	}
 	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
@@ -2985,6 +2999,7 @@
 					   link);
 	list_del(&rx_pkt_expected->link);
 	sys->len--;
+	spin_unlock_bh(&sys->spinlock);
 	if (size)
 		rx_pkt_expected->len = size;
 	rx_skb = rx_pkt_expected->data.skb;
@@ -3005,8 +3020,10 @@
 	struct ipa_rx_pkt_wrapper *rx_pkt_expected;
 	struct sk_buff *rx_skb;
 
+	spin_lock_bh(&sys->spinlock);
 	if (unlikely(list_empty(&sys->head_desc_list))) {
 		WARN_ON(1);
+		spin_unlock_bh(&sys->spinlock);
 		return;
 	}
 	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
@@ -3014,6 +3031,7 @@
 					   link);
 	list_del(&rx_pkt_expected->link);
 	sys->len--;
+	spin_unlock_bh(&sys->spinlock);
 
 	if (size)
 		rx_pkt_expected->len = size;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index 84dce6f..50fe2a1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -1495,8 +1495,16 @@
 			}
 		}
 	}
-	mutex_unlock(&ipa_ctx->lock);
 
+	/* commit the change to IPA-HW */
+	if (ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4) ||
+		ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6)) {
+		IPAERR_RL("fail to commit flt-rule\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		mutex_unlock(&ipa_ctx->lock);
+		return -EPERM;
+	}
+	mutex_unlock(&ipa_ctx->lock);
 	return 0;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index 6285130..61c3a71 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -1249,8 +1249,9 @@
 	struct ipa_hdr_offset_entry *off_next;
 	struct ipa_hdr_proc_ctx_offset_entry *ctx_off_entry;
 	struct ipa_hdr_proc_ctx_offset_entry *ctx_off_next;
-	int i, end = 0;
-	bool user_rule = false;
+	struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
+	struct ipa_hdr_proc_ctx_tbl *htbl_proc = &ipa_ctx->hdr_proc_ctx_tbl;
+	int i;
 
 	/*
 	 * issue a reset on the routing module since routing rules point to
@@ -1288,9 +1289,6 @@
 			return -EFAULT;
 		}
 
-		if (entry->ipacm_installed)
-			user_rule = true;
-
 		if (!user_only || entry->ipacm_installed) {
 			if (entry->is_hdr_proc_ctx) {
 				dma_unmap_single(ipa_ctx->pdev,
@@ -1298,9 +1296,15 @@
 					entry->hdr_len,
 					DMA_TO_DEVICE);
 				entry->proc_ctx = NULL;
+			} else {
+				/* move the offset entry to free list */
+				entry->offset_entry->ipacm_installed = 0;
+				list_move(&entry->offset_entry->link,
+				&htbl->head_free_offset_list[
+					entry->offset_entry->bin]);
 			}
 			list_del(&entry->link);
-			ipa_ctx->hdr_tbl.hdr_cnt--;
+			htbl->hdr_cnt--;
 			entry->ref_cnt = 0;
 			entry->cookie = 0;
 
@@ -1309,53 +1313,37 @@
 			kmem_cache_free(ipa_ctx->hdr_cache, entry);
 		}
 	}
-	for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
-		list_for_each_entry_safe(off_entry, off_next,
-					 &ipa_ctx->hdr_tbl.head_offset_list[i],
-					 link) {
 
-			/*
-			 * do not remove the default exception header which is
-			 * at offset 0
-			 */
-			if (off_entry->offset == 0)
-				continue;
-
-			if (!user_only ||
-					off_entry->ipacm_installed) {
+	/* only clean up offset_list and free_offset_list on global reset */
+	if (!user_only) {
+		for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+			list_for_each_entry_safe(off_entry, off_next,
+				&ipa_ctx->hdr_tbl.head_offset_list[i],
+				link) {
+				/**
+				 * do not remove the default exception
+				 * header which is at offset 0
+				 */
+				if (off_entry->offset == 0)
+					continue;
 				list_del(&off_entry->link);
 				kmem_cache_free(ipa_ctx->hdr_offset_cache,
 					off_entry);
-			} else {
-				if (off_entry->offset +
-					ipa_hdr_bin_sz[off_entry->bin] > end) {
-					end = off_entry->offset +
-						ipa_hdr_bin_sz[off_entry->bin];
-					IPADBG("replace end = %d\n", end);
-				}
 			}
-		}
-		list_for_each_entry_safe(off_entry, off_next,
+			list_for_each_entry_safe(off_entry, off_next,
 				&ipa_ctx->hdr_tbl.head_free_offset_list[i],
 				link) {
-
-			if (!user_only ||
-					off_entry->ipacm_installed) {
 				list_del(&off_entry->link);
 				kmem_cache_free(ipa_ctx->hdr_offset_cache,
 					off_entry);
 			}
 		}
+		/* there is one header of size 8 */
+		ipa_ctx->hdr_tbl.end = 8;
+		ipa_ctx->hdr_tbl.hdr_cnt = 1;
 	}
 
-	IPADBG("hdr_tbl.end = %d\n", end);
-	if (user_rule) {
-		ipa_ctx->hdr_tbl.end = end;
-		IPADBG("hdr_tbl.end = %d\n", end);
-	}
 	IPADBG("reset hdr proc ctx\n");
-	user_rule = false;
-	end = 0;
 	list_for_each_entry_safe(
 		ctx_entry,
 		ctx_next,
@@ -1364,17 +1352,18 @@
 
 		if (ipa_id_find(ctx_entry->id) == NULL) {
 			mutex_unlock(&ipa_ctx->lock);
-			WARN_ON(1);
+			WARN_ON_RATELIMIT_IPA(1);
 			return -EFAULT;
 		}
 
-		if (entry->ipacm_installed)
-			user_rule = true;
-
 		if (!user_only ||
 				ctx_entry->ipacm_installed) {
+			/* move the offset entry to appropriate free list */
+			list_move(&ctx_entry->offset_entry->link,
+				&htbl_proc->head_free_offset_list[
+					ctx_entry->offset_entry->bin]);
 			list_del(&ctx_entry->link);
-			ipa_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt--;
+			htbl_proc->proc_ctx_cnt--;
 			ctx_entry->ref_cnt = 0;
 			ctx_entry->cookie = 0;
 
@@ -1384,48 +1373,39 @@
 				ctx_entry);
 		}
 	}
-	for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
-		list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+	/* only clean up offset_list and free_offset_list on global reset */
+	if (!user_only) {
+		for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+			list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
 				&ipa_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
 				link) {
-
-			if (!user_only ||
-					ctx_off_entry->ipacm_installed) {
 				list_del(&ctx_off_entry->link);
 				kmem_cache_free(
 					ipa_ctx->hdr_proc_ctx_offset_cache,
 					ctx_off_entry);
-			} else {
-				if (ctx_off_entry->offset +
-					ipa_hdr_bin_sz[ctx_off_entry->bin]
-					> end) {
-					end = ctx_off_entry->offset +
-					ipa_hdr_bin_sz[ctx_off_entry->bin];
-					IPADBG("replace hdr_proc as %d\n", end);
-				}
 			}
-		}
-		list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
-			    &ipa_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
-			    link) {
-
-			if (!user_only ||
-					ctx_off_entry->ipacm_installed) {
+			list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+				&ipa_ctx->hdr_proc_ctx_tbl.
+				head_free_offset_list[i], link) {
 				list_del(&ctx_off_entry->link);
 				kmem_cache_free(
 					ipa_ctx->hdr_proc_ctx_offset_cache,
 					ctx_off_entry);
 			}
 		}
+		ipa_ctx->hdr_proc_ctx_tbl.end = 0;
+		ipa_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
 	}
 
-	IPADBG("hdr_proc_tbl.end = %d\n", end);
-	if (user_rule) {
-		ipa_ctx->hdr_proc_ctx_tbl.end = end;
-		IPADBG("hdr_proc_tbl.end = %d\n", end);
+	/* commit the change to IPA-HW */
+	if (ipa_ctx->ctrl->ipa_commit_hdr()) {
+		IPAERR_RL("fail to commit hdr\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		mutex_unlock(&ipa_ctx->lock);
+		return -EFAULT;
 	}
+
 	mutex_unlock(&ipa_ctx->lock);
-
 	return 0;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
index c043bad..15be5d6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
@@ -327,6 +327,11 @@
 	size_t tmp;
 	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
+	if (!ipa_ctx->nat_mem.is_dev_init) {
+		IPAERR_RL("Nat table not initialized\n");
+		return -EPERM;
+	}
+
 	IPADBG("\n");
 	if (init->table_entries == 0) {
 		IPADBG("Table entries is zero\n");
@@ -576,6 +581,11 @@
 	int ret = 0;
 	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
+	if (!ipa_ctx->nat_mem.is_dev_init) {
+		IPAERR_RL("Nat table not initialized\n");
+		return -EPERM;
+	}
+
 	IPADBG("\n");
 	if (dma->entries <= 0) {
 		IPAERR_RL("Invalid number of commands %d\n",
@@ -762,6 +772,16 @@
 	int result;
 	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
+	if (!ipa_ctx->nat_mem.is_dev_init) {
+		IPAERR_RL("Nat table not initialized\n");
+		return -EPERM;
+	}
+
+	if (ipa_ctx->nat_mem.public_ip_addr) {
+		IPAERR_RL("Public IP addr not assigned and trying to delete\n");
+		return -EPERM;
+	}
+
 	IPADBG("\n");
 	if (ipa_ctx->nat_mem.is_tmp_mem) {
 		IPAERR("using temp memory during nat del\n");
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 7710279..073409b 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -1445,6 +1445,15 @@
 			}
 		}
 	}
+
+	/* commit the change to IPA-HW */
+	if (ipa_ctx->ctrl->ipa_commit_rt(IPA_IP_v4) ||
+		ipa_ctx->ctrl->ipa_commit_rt(IPA_IP_v6)) {
+		IPAERR("fail to commit rt-rule\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		mutex_unlock(&ipa_ctx->lock);
+		return -EPERM;
+	}
 	mutex_unlock(&ipa_ctx->lock);
 
 	return 0;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index d76c208..681b009 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -72,6 +72,9 @@
 
 #define INVALID_EP_MAPPING_INDEX (-1)
 
+#define IPA_IS_RAN_OUT_OF_EQ(__eq_array, __eq_index) \
+		(ARRAY_SIZE(__eq_array) <= (__eq_index))
+
 struct ipa_ep_confing {
 	bool valid;
 	int pipe_num;
@@ -119,6 +122,7 @@
 	[IPA_2_0][IPA_CLIENT_MHI_PROD]           = {true, 18},
 	[IPA_2_0][IPA_CLIENT_Q6_LAN_PROD]        = {true,  6},
 	[IPA_2_0][IPA_CLIENT_Q6_CMD_PROD]        = {true,  7},
+
 	[IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
 						 = {true, 12},
 	[IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
@@ -1409,6 +1413,11 @@
 		}
 
 		if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) {
+			if (IPA_IS_RAN_OUT_OF_EQ(ipa_ihl_ofst_meq32,
+							ihl_ofst_meq32)) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
 			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
 				IPAERR("ran out of ihl_meq32 eq\n");
 				return -EPERM;
@@ -1426,6 +1435,11 @@
 		}
 
 		if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+			if (IPA_IS_RAN_OUT_OF_EQ(ipa_ihl_ofst_meq32,
+							ihl_ofst_meq32)) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
 			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
 				IPAERR("ran out of ihl_meq32 eq\n");
 				return -EPERM;
diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile
index ae4dccf..ed78342 100644
--- a/drivers/platform/msm/ipa/ipa_v3/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/Makefile
@@ -1,9 +1,19 @@
 obj-$(CONFIG_IPA3) += ipahal/
 
+ifdef CONFIG_X86
+ccflags-y += -DIPA_EMULATION_COMPILE=1
+else
+ccflags-y += -DIPA_EMULATION_COMPILE=0
+endif
+
 obj-$(CONFIG_IPA3) += ipat.o
 ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
 	ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
 	ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o \
 	ipa_hw_stats.o ipa_pm.o ipa_wdi3_i.o
 
+ifdef CONFIG_X86
+ipat-y += ipa_dtsi_replacement.o
+endif
+
 obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index c523b3d..b46da99 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -35,6 +35,7 @@
 #include <linux/time.h>
 #include <linux/hashtable.h>
 #include <linux/jhash.h>
+#include <linux/pci.h>
 #include <soc/qcom/subsystem_restart.h>
 #include <soc/qcom/smem.h>
 #include <soc/qcom/scm.h>
@@ -57,169 +58,14 @@
 #define CREATE_TRACE_POINTS
 #include "ipa_trace.h"
 
-#define IPA_GPIO_IN_QUERY_CLK_IDX 0
-#define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0
-#define IPA_GPIO_OUT_CLK_VOTE_IDX 1
-
-#define IPA_SUMMING_THRESHOLD (0x10)
-#define IPA_PIPE_MEM_START_OFST (0x0)
-#define IPA_PIPE_MEM_SIZE (0x0)
-#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
-			       x == IPA_MODE_MOBILE_AP_WAN || \
-			       x == IPA_MODE_MOBILE_AP_WLAN)
-#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
-#define IPA_A5_MUX_HEADER_LENGTH (8)
-
-#define IPA_AGGR_MAX_STR_LENGTH (10)
-
-#define CLEANUP_TAG_PROCESS_TIMEOUT 500
-
-#define IPA_AGGR_STR_IN_BYTES(str) \
-	(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
-
-#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
-
-#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
-
-#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
-#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
-#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
-#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
-
-#define IPA_MHI_GSI_EVENT_RING_ID_START 10
-#define IPA_MHI_GSI_EVENT_RING_ID_END 12
-
-#define IPA_SMEM_SIZE (8 * 1024)
-
-#define IPA_GSI_CHANNEL_HALT_MIN_SLEEP 5000
-#define IPA_GSI_CHANNEL_HALT_MAX_SLEEP 10000
-#define IPA_GSI_CHANNEL_HALT_MAX_TRY 10
-
-/* round addresses for closes page per SMMU requirements */
-#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
-	do { \
-		(iova_p) = rounddown((iova), PAGE_SIZE); \
-		(pa_p) = rounddown((pa), PAGE_SIZE); \
-		(size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \
-	} while (0)
-
-
-/* The relative location in /lib/firmware where the FWs will reside */
-#define IPA_FWS_PATH "ipa/ipa_fws.elf"
+/*
+ * The following for adding code (ie. for EMULATION) not found on x86.
+ */
+#if IPA_EMULATION_COMPILE == 1
+# include "ipa_emulation_stubs.h"
+#endif
 
 #ifdef CONFIG_COMPAT
-#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_ADD_HDR, \
-					compat_uptr_t)
-#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_DEL_HDR, \
-					compat_uptr_t)
-#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_ADD_RT_RULE, \
-					compat_uptr_t)
-#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_DEL_RT_RULE, \
-					compat_uptr_t)
-#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_ADD_FLT_RULE, \
-					compat_uptr_t)
-#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_DEL_FLT_RULE, \
-					compat_uptr_t)
-#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_GET_RT_TBL, \
-				compat_uptr_t)
-#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_COPY_HDR, \
-				compat_uptr_t)
-#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_QUERY_INTF, \
-				compat_uptr_t)
-#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_QUERY_INTF_TX_PROPS, \
-				compat_uptr_t)
-#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_QUERY_INTF_RX_PROPS, \
-					compat_uptr_t)
-#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
-					compat_uptr_t)
-#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_GET_HDR, \
-				compat_uptr_t)
-#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_ALLOC_NAT_MEM, \
-				compat_uptr_t)
-#define IPA_IOC_ALLOC_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_ALLOC_NAT_TABLE, \
-				compat_uptr_t)
-#define IPA_IOC_ALLOC_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_ALLOC_IPV6CT_TABLE, \
-				compat_uptr_t)
-#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_V4_INIT_NAT, \
-				compat_uptr_t)
-#define IPA_IOC_INIT_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_INIT_IPV6CT_TABLE, \
-				compat_uptr_t)
-#define IPA_IOC_TABLE_DMA_CMD32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_TABLE_DMA_CMD, \
-				compat_uptr_t)
-#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_V4_DEL_NAT, \
-				compat_uptr_t)
-#define IPA_IOC_DEL_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_DEL_NAT_TABLE, \
-				compat_uptr_t)
-#define IPA_IOC_DEL_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_DEL_IPV6CT_TABLE, \
-				compat_uptr_t)
-#define IPA_IOC_NAT_MODIFY_PDN32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_NAT_MODIFY_PDN, \
-				compat_uptr_t)
-#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_GET_NAT_OFFSET, \
-				compat_uptr_t)
-#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_PULL_MSG, \
-				compat_uptr_t)
-#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_RM_ADD_DEPENDENCY, \
-				compat_uptr_t)
-#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_RM_DEL_DEPENDENCY, \
-				compat_uptr_t)
-#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_GENERATE_FLT_EQ, \
-				compat_uptr_t)
-#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_QUERY_RT_TBL_INDEX, \
-				compat_uptr_t)
-#define IPA_IOC_WRITE_QMAPID32  _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_WRITE_QMAPID, \
-				compat_uptr_t)
-#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_MDFY_FLT_RULE, \
-				compat_uptr_t)
-#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
-				compat_uptr_t)
-#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
-				compat_uptr_t)
-#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
-					IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
-					compat_uptr_t)
-#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_ADD_HDR_PROC_CTX, \
-				compat_uptr_t)
-#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_DEL_HDR_PROC_CTX, \
-				compat_uptr_t)
-#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
-				IPA_IOCTL_MDFY_RT_RULE, \
-				compat_uptr_t)
-
 /**
  * struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation
  * properties
@@ -243,8 +89,7 @@
 	compat_size_t size;
 	compat_off_t offset;
 };
-
-#endif
+#endif /* #ifdef CONFIG_COMPAT */
 
 #define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
 #define TZ_MEM_PROTECT_REGION_ID 0x10
@@ -284,6 +129,7 @@
 static struct clk *ipa3_clk;
 
 struct ipa3_context *ipa3_ctx;
+
 static struct {
 	bool present[IPA_SMMU_CB_MAX];
 	bool arm_smmu;
@@ -653,10 +499,15 @@
 		return;
 	}
 
-	if (type != ADD_VLAN_IFACE &&
-	    type != DEL_VLAN_IFACE &&
-	    type != ADD_L2TP_VLAN_MAPPING &&
-		type != DEL_L2TP_VLAN_MAPPING) {
+	switch (type) {
+	case ADD_VLAN_IFACE:
+	case DEL_VLAN_IFACE:
+	case ADD_L2TP_VLAN_MAPPING:
+	case DEL_L2TP_VLAN_MAPPING:
+	case ADD_BRIDGE_VLAN_MAPPING:
+	case DEL_BRIDGE_VLAN_MAPPING:
+		break;
+	default:
 		IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
 		return;
 	}
@@ -669,10 +520,17 @@
 	int retval;
 	struct ipa_ioc_vlan_iface_info *vlan_info;
 	struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info;
+	struct ipa_ioc_bridge_vlan_mapping_info *bridge_vlan_info;
 	struct ipa_msg_meta msg_meta;
+	void *buff;
 
-	if (msg_type == ADD_VLAN_IFACE ||
-		msg_type == DEL_VLAN_IFACE) {
+	IPADBG("type %d\n", msg_type);
+
+	memset(&msg_meta, 0, sizeof(msg_meta));
+	msg_meta.msg_type = msg_type;
+
+	if ((msg_type == ADD_VLAN_IFACE) ||
+		(msg_type == DEL_VLAN_IFACE)) {
 		vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info),
 			GFP_KERNEL);
 		if (!vlan_info) {
@@ -686,18 +544,10 @@
 			return -EFAULT;
 		}
 
-		memset(&msg_meta, 0, sizeof(msg_meta));
-		msg_meta.msg_type = msg_type;
 		msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info);
-		retval = ipa3_send_msg(&msg_meta, vlan_info,
-			ipa3_vlan_l2tp_msg_free_cb);
-		if (retval) {
-			IPAERR("ipa3_send_msg failed: %d\n", retval);
-			kfree(vlan_info);
-			return retval;
-		}
-	} else if (msg_type == ADD_L2TP_VLAN_MAPPING ||
-		msg_type == DEL_L2TP_VLAN_MAPPING) {
+		buff = vlan_info;
+	} else if ((msg_type == ADD_L2TP_VLAN_MAPPING) ||
+		(msg_type == DEL_L2TP_VLAN_MAPPING)) {
 		mapping_info = kzalloc(sizeof(struct
 			ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL);
 		if (!mapping_info) {
@@ -712,22 +562,46 @@
 			return -EFAULT;
 		}
 
-		memset(&msg_meta, 0, sizeof(msg_meta));
-		msg_meta.msg_type = msg_type;
 		msg_meta.msg_len = sizeof(struct
 			ipa_ioc_l2tp_vlan_mapping_info);
-		retval = ipa3_send_msg(&msg_meta, mapping_info,
-			ipa3_vlan_l2tp_msg_free_cb);
-		if (retval) {
-			IPAERR("ipa3_send_msg failed: %d\n", retval);
-			kfree(mapping_info);
-			return retval;
+		buff = mapping_info;
+	} else if ((msg_type == ADD_BRIDGE_VLAN_MAPPING) ||
+		(msg_type == DEL_BRIDGE_VLAN_MAPPING)) {
+		bridge_vlan_info = kzalloc(
+			sizeof(struct ipa_ioc_bridge_vlan_mapping_info),
+			GFP_KERNEL);
+		if (!bridge_vlan_info) {
+			IPAERR("no memory\n");
+			return -ENOMEM;
 		}
+
+		if (copy_from_user((u8 *)bridge_vlan_info,
+			(void __user *)usr_param,
+			sizeof(struct ipa_ioc_bridge_vlan_mapping_info))) {
+			kfree(bridge_vlan_info);
+			IPAERR("copy from user failed\n");
+			return -EFAULT;
+		}
+
+		msg_meta.msg_len = sizeof(struct
+			ipa_ioc_bridge_vlan_mapping_info);
+		buff = bridge_vlan_info;
 	} else {
 		IPAERR("Unexpected event\n");
 		return -EFAULT;
 	}
 
+	retval = ipa3_send_msg(&msg_meta, buff,
+		ipa3_vlan_l2tp_msg_free_cb);
+	if (retval) {
+		IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
+			retval,
+			msg_type);
+		kfree(buff);
+		return retval;
+	}
+	IPADBG("exit\n");
+
 	return 0;
 }
 
@@ -1880,7 +1754,18 @@
 			break;
 		}
 		break;
-
+	case IPA_IOC_ADD_BRIDGE_VLAN_MAPPING:
+		if (ipa3_send_vlan_l2tp_msg(arg, ADD_BRIDGE_VLAN_MAPPING)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_DEL_BRIDGE_VLAN_MAPPING:
+		if (ipa3_send_vlan_l2tp_msg(arg, DEL_BRIDGE_VLAN_MAPPING)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
 	case IPA_IOC_ADD_L2TP_VLAN_MAPPING:
 		if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) {
 			retval = -EFAULT;
@@ -2587,9 +2472,12 @@
 		/* disable statuses for all modem controlled prod pipes */
 		if (IPA_CLIENT_IS_Q6_PROD(client_idx) ||
 			(ipa3_ctx->ep[ep_idx].valid &&
-			ipa3_ctx->ep[ep_idx].skip_ep_cfg)) {
+			ipa3_ctx->ep[ep_idx].skip_ep_cfg) ||
+			(ipa3_ctx->ep[ep_idx].client == IPA_CLIENT_APPS_WAN_PROD
+			&& ipa3_ctx->modem_cfg_emb_pipe_flt)) {
 			ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes);
 
+			ipa3_ctx->ep[ep_idx].status.status_en = false;
 			reg_write.skip_pipeline_clear = false;
 			reg_write.pipeline_clear_options =
 				IPAHAL_HPS_CLEAR;
@@ -2661,6 +2549,8 @@
 	 */
 	ipa3_q6_pipe_delay(false);
 
+	ipa3_set_usb_prod_pipe_delay();
+
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 	IPADBG_LOW("Exit with success\n");
 }
@@ -2735,6 +2625,16 @@
 	u32 *ipa_sram_mmio;
 	unsigned long phys_addr;
 
+	IPADBG(
+	    "ipa_wrapper_base(0x%08X) ipa_reg_base_ofst(0x%08X) IPA_SRAM_DIRECT_ACCESS_n(0x%08X) smem_restricted_bytes(0x%08X) smem_sz(0x%08X)\n",
+	    ipa3_ctx->ipa_wrapper_base,
+	    ipa3_ctx->ctrl->ipa_reg_base_ofst,
+	    ipahal_get_reg_n_ofst(
+		IPA_SRAM_DIRECT_ACCESS_n,
+		ipa3_ctx->smem_restricted_bytes / 4),
+	    ipa3_ctx->smem_restricted_bytes,
+	    ipa3_ctx->smem_sz);
+
 	phys_addr = ipa3_ctx->ipa_wrapper_base +
 		ipa3_ctx->ctrl->ipa_reg_base_ofst +
 		ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
@@ -3207,21 +3107,27 @@
 	}
 	IPADBG("Apps to IPA cmd pipe is connected\n");
 
+	IPADBG("Will initialize SRAM\n");
 	ipa3_ctx->ctrl->ipa_init_sram();
 	IPADBG("SRAM initialized\n");
 
+	IPADBG("Will initialize HDR\n");
 	ipa3_ctx->ctrl->ipa_init_hdr();
 	IPADBG("HDR initialized\n");
 
+	IPADBG("Will initialize V4 RT\n");
 	ipa3_ctx->ctrl->ipa_init_rt4();
 	IPADBG("V4 RT initialized\n");
 
+	IPADBG("Will initialize V6 RT\n");
 	ipa3_ctx->ctrl->ipa_init_rt6();
 	IPADBG("V6 RT initialized\n");
 
+	IPADBG("Will initialize V4 FLT\n");
 	ipa3_ctx->ctrl->ipa_init_flt4();
 	IPADBG("V4 FLT initialized\n");
 
+	IPADBG("Will initialize V6 FLT\n");
 	ipa3_ctx->ctrl->ipa_init_flt6();
 	IPADBG("V6 FLT initialized\n");
 
@@ -4376,7 +4282,7 @@
 		IPAERR("uC panic handler failed %d\n", res);
 
 	if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) != 0)
-		ipahal_print_all_regs();
+		ipahal_print_all_regs(false);
 
 	return NOTIFY_DONE;
 }
@@ -4412,8 +4318,8 @@
 	int result;
 
 	result = gsi_configure_regs(ipa3_res.transport_mem_base,
-		ipa3_res.transport_mem_size,
-		ipa3_res.ipa_mem_base);
+				    ipa3_res.transport_mem_size,
+				    ipa3_res.ipa_mem_base);
 	if (result) {
 		IPAERR("Failed to configure GSI registers\n");
 		return -EINVAL;
@@ -4506,12 +4412,19 @@
 	if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0)
 		ipa3_proxy_clk_vote();
 
-	/* SMMU was already attached if used, safe to do allocations */
-	if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
-		ipa3_ctx->pdev)) {
-		IPAERR("fail to init ipahal\n");
-		result = -EFAULT;
-		goto fail_ipahal;
+	/*
+	 * SMMU was already attached if used, safe to do allocations
+	 *
+	 * NOTE WELL: On an emulation system, this allocation is done
+	 *            in ipa3_pre_init()
+	 */
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
+		if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
+				ipa3_ctx->pdev)) {
+			IPAERR("fail to init ipahal\n");
+			result = -EFAULT;
+			goto fail_ipahal;
+		}
 	}
 
 	result = ipa3_init_hw();
@@ -4646,9 +4559,18 @@
 	gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
 	gsi_props.ee = resource_p->ee;
 	gsi_props.intr = GSI_INTR_IRQ;
-	gsi_props.irq = resource_p->transport_irq;
 	gsi_props.phys_addr = resource_p->transport_mem_base;
 	gsi_props.size = resource_p->transport_mem_size;
+	if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		gsi_props.irq = resource_p->emulator_irq;
+		gsi_props.emulator_intcntrlr_client_isr = ipa3_get_isr();
+		gsi_props.emulator_intcntrlr_addr =
+		    resource_p->emulator_intcntrlr_mem_base;
+		gsi_props.emulator_intcntrlr_size =
+		    resource_p->emulator_intcntrlr_mem_size;
+	} else {
+		gsi_props.irq = resource_p->transport_irq;
+	}
 	gsi_props.notify_cb = ipa_gsi_notify_cb;
 	gsi_props.req_clk_cb = NULL;
 	gsi_props.rel_clk_cb = NULL;
@@ -4716,12 +4638,12 @@
 
 	ipa3_register_panic_hdlr();
 
+	ipa3_debugfs_init();
+
 	mutex_lock(&ipa3_ctx->lock);
 	ipa3_ctx->ipa_initialization_complete = true;
 	mutex_unlock(&ipa3_ctx->lock);
 
-	ipa3_debugfs_init();
-
 	ipa3_trigger_ipa_ready_cbs();
 	complete_all(&ipa3_ctx->init_completion_obj);
 	pr_info("IPA driver initialization was successful.\n");
@@ -4734,15 +4656,16 @@
 	gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
 fail_register_device:
 	ipa3_destroy_flt_tbl_idrs();
-	ipa3_proxy_clk_unvote();
 fail_allok_pkt_init:
 	ipa3_nat_ipv6ct_destroy_devices();
 fail_nat_ipv6ct_init_dev:
 	ipa3_free_dma_task_for_gsi();
 fail_dma_task:
 fail_init_hw:
-	ipahal_destroy();
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION)
+		ipahal_destroy();
 fail_ipahal:
+	ipa3_proxy_clk_unvote();
 
 	return result;
 }
@@ -4751,10 +4674,24 @@
 {
 	int result;
 	const struct firmware *fw;
+	const char *path = IPA_FWS_PATH;
 
-	IPADBG("Manual FW loading process initiated\n");
+	if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		switch (ipa3_get_emulation_type()) {
+		case IPA_HW_v3_5_1:
+			path = IPA_FWS_PATH_3_5_1;
+			break;
+		case IPA_HW_v4_0:
+			path = IPA_FWS_PATH_4_0;
+			break;
+		default:
+			break;
+		}
+	}
 
-	result = request_firmware(&fw, IPA_FWS_PATH, ipa3_ctx->cdev.dev);
+	IPADBG("Manual FW loading (%s) process initiated\n", path);
+
+	result = request_firmware(&fw, path, ipa3_ctx->cdev.dev);
 	if (result < 0) {
 		IPAERR("request_firmware failed, error %d\n", result);
 		return result;
@@ -4766,7 +4703,13 @@
 
 	IPADBG("FWs are available for loading\n");
 
-	result = ipa3_load_fws(fw, ipa3_res.transport_mem_base);
+	if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		result = emulator_load_fws(fw,
+					   ipa3_res.transport_mem_base,
+					   ipa3_res.transport_mem_size);
+	} else {
+		result = ipa3_load_fws(fw, ipa3_res.transport_mem_base);
+	}
 	if (result) {
 		IPAERR("Manual IPA FWs loading has failed\n");
 		release_firmware(fw);
@@ -4785,6 +4728,7 @@
 	release_firmware(fw);
 
 	IPADBG("Manual FW loading process is complete\n");
+
 	return 0;
 }
 
@@ -4818,7 +4762,8 @@
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 
-	if (ipa3_is_msm_device() || (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5))
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION &&
+	    (ipa3_is_msm_device() || (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)))
 		result = ipa3_pil_load_ipa_fws();
 	else
 		result = ipa3_manual_load_ipa_fws();
@@ -5096,6 +5041,7 @@
 	ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
 	ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
 	ipa3_ctx->use_ipa_pm = resource_p->use_ipa_pm;
+	ipa3_ctx->wdi_over_pcie = resource_p->wdi_over_pcie;
 	ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
 	ipa3_ctx->ipa_config_is_mhi = resource_p->ipa_mhi_dynamic_config;
 	ipa3_ctx->mhi_evid_limits[0] = resource_p->mhi_evid_limits[0];
@@ -5155,7 +5101,8 @@
 		goto fail_init_mem_partition;
 	}
 
-	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL) {
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL &&
+	    ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
 		ipa3_ctx->ctrl->msm_bus_data_ptr =
 			msm_bus_cl_get_pdata(ipa3_ctx->master_pdev);
 		if (ipa3_ctx->ctrl->msm_bus_data_ptr == NULL) {
@@ -5205,6 +5152,28 @@
 		goto fail_remap;
 	}
 
+	IPADBG(
+	    "base(0x%x)+offset(0x%x)=(0x%x) mapped to (%pK) with len (0x%x)\n",
+	    resource_p->ipa_mem_base,
+	    ipa3_ctx->ctrl->ipa_reg_base_ofst,
+	    resource_p->ipa_mem_base + ipa3_ctx->ctrl->ipa_reg_base_ofst,
+	    ipa3_ctx->mmio,
+	    resource_p->ipa_mem_size);
+
+	/*
+	 * Emulation requires ipahal be initialized early...for FW
+	 * download, hence...
+	 */
+	if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		if (ipahal_init(ipa3_ctx->ipa_hw_type,
+				ipa3_ctx->mmio,
+				&(ipa3_ctx->master_pdev->dev))) {
+			IPAERR("fail to init ipahal\n");
+			result = -EFAULT;
+			goto fail_ipahal_init;
+		}
+	}
+
 	mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
 
 	IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
@@ -5413,10 +5382,13 @@
 	 * We can't register the GSI driver yet, as it expects
 	 * the GSI FW to be up and running before the registration.
 	 *
-	 * For IPA3.0, the GSI configuration is done by the GSI driver.
+	 * For IPA3.0 and the emulation system, the GSI configuration
+	 * is done by the GSI driver.
+	 *
 	 * For IPA3.1 (and on), the GSI configuration is done by TZ.
 	 */
-	if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0) {
+	if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0 ||
+	    ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
 		result = ipa3_gsi_pre_fw_load_init();
 		if (result) {
 			IPAERR("gsi pre FW loading config failed\n");
@@ -5494,6 +5466,9 @@
 fail_create_transport_wq:
 	destroy_workqueue(ipa3_ctx->power_mgmt_wq);
 fail_init_hw:
+	if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION)
+		ipahal_destroy();
+fail_ipahal_init:
 	iounmap(ipa3_ctx->mmio);
 fail_remap:
 	ipa3_disable_clks();
@@ -5806,6 +5781,7 @@
 			&ipa_drv_res->ee);
 	if (result)
 		ipa_drv_res->ee = 0;
+	IPADBG(":ee = %u\n", ipa_drv_res->ee);
 
 	ipa_drv_res->apply_rg10_wa =
 		of_property_read_bool(pdev->dev.of_node,
@@ -5818,7 +5794,7 @@
 		of_property_read_bool(pdev->dev.of_node,
 		"qcom,do-not-use-ch-gsi-20");
 	IPADBG(": GSI CH 20 WA is = %s\n",
-		ipa_drv_res->apply_rg10_wa
+		ipa_drv_res->gsi_ch20_wa
 		? "Needed" : "Not needed");
 
 	elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
@@ -5897,6 +5873,32 @@
 		return result;
 	}
 
+	ipa_drv_res->wdi_over_pcie =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,wlan-ce-db-over-pcie");
+	IPADBG("Is wdi_over_pcie ? (%s)\n",
+				ipa3_ctx->wdi_over_pcie ? "Yes":"No");
+
+	/*
+	 * If we're on emulator, get its interrupt controller's mem
+	 * start and size
+	 */
+	if (ipa_drv_res->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		resource = platform_get_resource_byname(
+		    pdev, IORESOURCE_MEM, "intctrl-base");
+		if (!resource) {
+			IPAERR(":Can't find intctrl-base resource\n");
+			return -ENODEV;
+		}
+		ipa_drv_res->emulator_intcntrlr_mem_base =
+		    resource->start;
+		ipa_drv_res->emulator_intcntrlr_mem_size =
+		    resource_size(resource);
+		IPADBG(":using intctrl-base at 0x%x of size 0x%x\n",
+		       ipa_drv_res->emulator_intcntrlr_mem_base,
+		       ipa_drv_res->emulator_intcntrlr_mem_size);
+	}
+
 	return 0;
 }
 
@@ -6278,7 +6280,7 @@
 			iova_p, &pa_p, size_p);
 		ipa3_iommu_map(cb->mapping->domain,
 			iova_p, pa_p, size_p,
-			IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
+			IOMMU_READ | IOMMU_WRITE);
 	}
 
 
@@ -6691,5 +6693,216 @@
 	return 0;
 }
 
+/**************************************************************
+ *            PCIe Version
+ *************************************************************/
+
+int ipa3_pci_drv_probe(
+	struct pci_dev            *pci_dev,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match)
+{
+	int result;
+	struct ipa3_plat_drv_res *ipa_drv_res;
+	u32 bar0_offset;
+	u32 mem_start;
+	u32 mem_end;
+	uint32_t bits;
+	uint32_t ipa_start, gsi_start, intctrl_start;
+	struct device *dev;
+	static struct platform_device platform_dev;
+
+	if (!pci_dev || !api_ctrl || !pdrv_match) {
+		IPAERR(
+		    "Bad arg: pci_dev (%pK) and/or api_ctrl (%pK) and/or pdrv_match (%pK)\n",
+		    pci_dev, api_ctrl, pdrv_match);
+		return -EOPNOTSUPP;
+	}
+
+	dev = &(pci_dev->dev);
+
+	IPADBG("IPA PCI driver probing started\n");
+
+	/*
+	 * Follow PCI driver flow here.
+	 * pci_enable_device:  Enables device and assigns resources
+	 * pci_request_region:  Makes BAR0 address region usable
+	 */
+	result = pci_enable_device(pci_dev);
+	if (result < 0) {
+		IPAERR("pci_enable_device() failed\n");
+		return -EOPNOTSUPP;
+	}
+
+	result = pci_request_region(pci_dev, 0, "IPA Memory");
+	if (result < 0) {
+		IPAERR("pci_request_region() failed\n");
+		pci_disable_device(pci_dev);
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * When in the PCI/emulation environment, &platform_dev is
+	 * passed to get_ipa_dts_configuration(), but is unused, since
+	 * all usages of it in the function are replaced by CPP
+	 * relative to definitions in ipa_emulation_stubs.h.  Passing
+	 * &platform_dev makes code validity tools happy.
+	 */
+	if (get_ipa_dts_configuration(&platform_dev, &ipa3_res) != 0) {
+		IPAERR("get_ipa_dts_configuration() failed\n");
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return -EOPNOTSUPP;
+	}
+
+	ipa_drv_res = &ipa3_res;
+
+	result =
+		of_property_read_u32(NULL, "emulator-bar0-offset",
+				     &bar0_offset);
+	if (result) {
+		IPAERR(":get resource failed for emulator-bar0-offset!\n");
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return -ENODEV;
+	}
+	IPADBG(":using emulator-bar0-offset 0x%08X\n", bar0_offset);
+
+	ipa_start     = ipa_drv_res->ipa_mem_base;
+	gsi_start     = ipa_drv_res->transport_mem_base;
+	intctrl_start = ipa_drv_res->emulator_intcntrlr_mem_base;
+
+	/*
+	 * Where will we be inerrupted at?
+	 */
+	ipa_drv_res->emulator_irq = pci_dev->irq;
+	IPADBG(
+	    "EMULATION PCI_INTERRUPT_PIN(%u)\n",
+	    ipa_drv_res->emulator_irq);
+
+	/*
+	 * Set the ipa_mem_base to the PCI base address of BAR0
+	 */
+	mem_start = pci_resource_start(pci_dev, 0);
+	mem_end   = pci_resource_end(pci_dev, 0);
+
+	IPADBG("PCI START                = 0x%x\n", mem_start);
+	IPADBG("PCI END                  = 0x%x\n", mem_end);
+
+	ipa_drv_res->ipa_mem_base = mem_start + bar0_offset;
+
+	smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
+	smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
+
+	ipa_drv_res->transport_mem_base =
+	    ipa_drv_res->ipa_mem_base + (gsi_start - ipa_start);
+
+	ipa_drv_res->emulator_intcntrlr_mem_base =
+	    ipa_drv_res->ipa_mem_base + (intctrl_start - ipa_start);
+
+	IPADBG("ipa_mem_base                = 0x%x\n",
+	       ipa_drv_res->ipa_mem_base);
+	IPADBG("ipa_mem_size                = 0x%x\n",
+	       ipa_drv_res->ipa_mem_size);
+
+	IPADBG("transport_mem_base          = 0x%x\n",
+	       ipa_drv_res->transport_mem_base);
+	IPADBG("transport_mem_size          = 0x%x\n",
+	       ipa_drv_res->transport_mem_size);
+
+	IPADBG("emulator_intcntrlr_mem_base = 0x%x\n",
+	       ipa_drv_res->emulator_intcntrlr_mem_base);
+	IPADBG("emulator_intcntrlr_mem_size = 0x%x\n",
+	       ipa_drv_res->emulator_intcntrlr_mem_size);
+
+	result = ipa3_bind_api_controller(ipa_drv_res->ipa_hw_type, api_ctrl);
+	if (result != 0) {
+		IPAERR("ipa3_bind_api_controller() failed\n");
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return result;
+	}
+
+	bits = (ipa_drv_res->use_64_bit_dma_mask) ? 64 : 32;
+
+	if (dma_set_mask(dev, DMA_BIT_MASK(bits)) != 0) {
+		IPAERR("dma_set_mask(%pK, %u) failed\n", dev, bits);
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return -EOPNOTSUPP;
+	}
+
+	if (dma_set_coherent_mask(dev, DMA_BIT_MASK(bits)) != 0) {
+		IPAERR("dma_set_coherent_mask(%pK, %u) failed\n", dev, bits);
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return -EOPNOTSUPP;
+	}
+
+	pci_set_master(pci_dev);
+
+	memset(&platform_dev, 0, sizeof(platform_dev));
+	platform_dev.dev = *dev;
+
+	/* Proceed to real initialization */
+	result = ipa3_pre_init(&ipa3_res, &platform_dev);
+	if (result) {
+		IPAERR("ipa3_init failed\n");
+		pci_clear_master(pci_dev);
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return result;
+	}
+
+	return result;
+}
+
+/*
+ * The following returns transport register memory location and
+ * size...
+ */
+int ipa3_get_transport_info(
+	phys_addr_t *phys_addr_ptr,
+	unsigned long *size_ptr)
+{
+	if (!phys_addr_ptr || !size_ptr) {
+		IPAERR("Bad arg: phys_addr_ptr(%pK) and/or size_ptr(%pK)\n",
+		       phys_addr_ptr, size_ptr);
+		return -EINVAL;
+	}
+
+	*phys_addr_ptr = ipa3_res.transport_mem_base;
+	*size_ptr      = ipa3_res.transport_mem_size;
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa3_get_transport_info);
+
+static uint emulation_type = IPA_HW_v4_0;
+
+/*
+ * The following returns emulation type...
+ */
+uint ipa3_get_emulation_type(void)
+{
+	return emulation_type;
+}
+
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("IPA HW device driver");
+
+/*
+ * Module parameter. Invoke as follows:
+ *     insmod ipat.ko emulation_type=[13|14|...|N]
+ * Examples:
+ *   insmod ipat.ko emulation_type=13 # for IPA 3.5.1
+ *   insmod ipat.ko emulation_type=14 # for IPA 4.0
+ *
+ * NOTE: The emulation_type values need to come from: enum ipa_hw_type
+ *
+ */
+
+module_param(emulation_type, uint, 0000);
+MODULE_PARM_DESC(
+	emulation_type,
+	"IPA emulation type (Use 13 for IPA 3.5.1, 14 for IPA 4.0)");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index fe39440..5bcd49e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -635,6 +635,69 @@
 	return 0;
 }
 
+void ipa3_register_lock_unlock_callback(int (*client_cb)(bool is_lock),
+						u32 ipa_ep_idx)
+{
+	struct ipa3_ep_context *ep;
+
+	IPADBG("entry\n");
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (!ep->valid) {
+		IPAERR("Invalid EP\n");
+		return;
+	}
+
+	if (client_cb == NULL) {
+		IPAERR("Bad Param");
+		return;
+	}
+
+	ep->client_lock_unlock = client_cb;
+	IPADBG("exit\n");
+}
+
+void ipa3_deregister_lock_unlock_callback(u32 ipa_ep_idx)
+{
+	struct ipa3_ep_context *ep;
+
+	IPADBG("entry\n");
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (!ep->valid) {
+		IPAERR("Invalid EP\n");
+		return;
+	}
+
+	if (ep->client_lock_unlock == NULL) {
+		IPAERR("client_lock_unlock is already NULL");
+		return;
+	}
+
+	ep->client_lock_unlock = NULL;
+	IPADBG("exit\n");
+}
+
+static void client_lock_unlock_cb(u32 ipa_ep_idx, bool is_lock)
+{
+	struct ipa3_ep_context *ep;
+
+	IPADBG("entry\n");
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (!ep->valid) {
+		IPAERR("Invalid EP\n");
+		return;
+	}
+
+	if (ep->client_lock_unlock)
+		ep->client_lock_unlock(is_lock);
+
+	IPADBG("exit\n");
+}
 
 int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
 			     struct ipa_req_chan_out_params *out_params)
@@ -1259,6 +1322,46 @@
 	return result;
 }
 
+/*
+ * Set USB PROD pipe delay for MBIM/RMNET config
+ * Clocks, should be voted before calling this API
+ * locks should be taken before calling this API
+ */
+
+void ipa3_set_usb_prod_pipe_delay(void)
+{
+	int result;
+	int pipe_idx;
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg_ctrl ep_ctrl;
+
+	memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	ep_ctrl.ipa_ep_delay = true;
+
+
+	pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
+
+	if (pipe_idx == IPA_EP_NOT_ALLOCATED) {
+		IPAERR("client (%d) not valid\n", IPA_CLIENT_USB_PROD);
+		return;
+	}
+
+	ep = &ipa3_ctx->ep[pipe_idx];
+
+	/* Setting delay on USB_PROD with skip_ep_cfg */
+	client_lock_unlock_cb(pipe_idx, true);
+	if (ep->valid && ep->skip_ep_cfg) {
+		ep->ep_delay_set = ep_ctrl.ipa_ep_delay;
+		result = ipa3_cfg_ep_ctrl(pipe_idx, &ep_ctrl);
+		if (result)
+			IPAERR("client (ep: %d) failed result=%d\n",
+				pipe_idx, result);
+		else
+			IPADBG("client (ep: %d) success\n", pipe_idx);
+	}
+	client_lock_unlock_cb(pipe_idx, false);
+}
+
 void ipa3_xdci_ep_delay_rm(u32 clnt_hdl)
 {
 	struct ipa3_ep_context *ep;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index dc5f5e0..10abdcf 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -73,6 +73,8 @@
 	__stringify(DEL_L2TP_VLAN_MAPPING),
 	__stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT),
 	__stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT),
+	__stringify(ADD_BRIDGE_VLAN_MAPPING),
+	__stringify(DEL_BRIDGE_VLAN_MAPPING),
 };
 
 const char *ipa3_hdr_l2_type_name[] = {
@@ -1950,7 +1952,7 @@
 		size_t count, loff_t *ppos)
 {
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-	ipahal_print_all_regs();
+	ipahal_print_all_regs(true);
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 
 	return 0;
@@ -2291,6 +2293,13 @@
 		goto fail;
 	}
 
+	file = debugfs_create_u32("clk_rate", IPA_READ_ONLY_MODE,
+		dent, &ipa3_ctx->curr_ipa_clk_rate);
+	if (!file) {
+		IPAERR("could not create clk_rate file\n");
+		goto fail;
+	}
+
 	ipa_debugfs_init_stats(dent);
 
 	return;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index e73349a..7603152 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -35,6 +35,9 @@
 #define IPA_GENERIC_AGGR_TIME_LIMIT 1
 #define IPA_GENERIC_AGGR_PKT_LIMIT 0
 
+#define IPA_GSB_AGGR_BYTE_LIMIT 14
+#define IPA_GSB_RX_BUFF_BASE_SZ 16384
+
 #define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
 #define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
 		(X) + NET_SKB_PAD) +\
@@ -1061,6 +1064,13 @@
 		goto fail_gen2;
 	}
 
+	result = gsi_start_channel(ep->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("gsi_start_channel failed res=%d ep=%d.\n", result,
+			ipa_ep_idx);
+		goto fail_gen2;
+	}
+
 	if (!ep->keep_ipa_awake)
 		IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
 
@@ -1110,11 +1120,6 @@
 		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
 
 	ipa3_disable_data_path(clnt_hdl);
-	if (ep->napi_enabled) {
-		do {
-			usleep_range(95, 105);
-		} while (atomic_read(&ep->sys->curr_polling_state));
-	}
 
 	if (IPA_CLIENT_IS_PROD(ep->client)) {
 		do {
@@ -1128,9 +1133,6 @@
 		} while (1);
 	}
 
-	if (IPA_CLIENT_IS_CONS(ep->client))
-		cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
-	flush_workqueue(ep->sys->wq);
 	/* channel stop might fail on timeout if IPA is busy */
 	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
 		result = ipa3_stop_gsi_channel(clnt_hdl);
@@ -1138,7 +1140,7 @@
 			break;
 
 		if (result != -GSI_STATUS_AGAIN &&
-		    result != -GSI_STATUS_TIMED_OUT)
+			result != -GSI_STATUS_TIMED_OUT)
 			break;
 	}
 
@@ -1147,6 +1149,17 @@
 		ipa_assert();
 		return result;
 	}
+
+	if (ep->napi_enabled) {
+		do {
+			usleep_range(95, 105);
+		} while (atomic_read(&ep->sys->curr_polling_state));
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
+	flush_workqueue(ep->sys->wq);
+
 	result = ipa3_reset_gsi_channel(clnt_hdl);
 	if (result != GSI_STATUS_SUCCESS) {
 		IPAERR("Failed to reset chan: %d.\n", result);
@@ -2933,7 +2946,6 @@
 			INIT_DELAYED_WORK(&sys->replenish_rx_work,
 				ipa3_replenish_rx_work_func);
 			atomic_set(&sys->curr_polling_state, 0);
-			sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
 			sys->rx_pool_sz = in->desc_fifo_sz /
 				IPA_FIFO_ELEMENT_SIZE - 1;
 			if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
@@ -2941,8 +2953,23 @@
 			sys->pyld_hdlr = ipa3_odu_rx_pyld_hdlr;
 			sys->get_skb = ipa3_get_skb_ipa_rx;
 			sys->free_skb = ipa3_free_skb_rx;
-			sys->free_rx_wrapper = ipa3_free_rx_wrapper;
-			sys->repl_hdlr = ipa3_replenish_rx_cache;
+			/* recycle skb for GSB use case */
+			if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+				sys->free_rx_wrapper =
+					ipa3_free_rx_wrapper;
+				sys->repl_hdlr =
+					ipa3_replenish_rx_cache;
+				/* Overwrite buffer size & aggr limit for GSB */
+				sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
+					IPA_GSB_RX_BUFF_BASE_SZ);
+				in->ipa_ep_cfg.aggr.aggr_byte_limit =
+					IPA_GSB_AGGR_BYTE_LIMIT;
+			} else {
+				sys->free_rx_wrapper =
+					ipa3_free_rx_wrapper;
+				sys->repl_hdlr = ipa3_replenish_rx_cache;
+				sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
+			}
 		} else if (in->client ==
 				IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
 			IPADBG("assigning policy to client:%d",
@@ -3681,8 +3708,13 @@
 	ep->gsi_mem_info.chan_ring_base_vaddr =
 		gsi_channel_props.ring_base_vaddr;
 
-	gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		gsi_channel_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	else
+		gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
 	gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	gsi_channel_props.prefetch_mode =
+		ipa_get_ep_prefetch_mode(ep->client);
 	if (ep->client == IPA_CLIENT_APPS_CMD_PROD)
 		gsi_channel_props.low_weight = IPA_GSI_MAX_CH_LOW_WEIGHT;
 	else
@@ -3716,15 +3748,11 @@
 		goto fail_write_channel_scratch;
 	}
 
-	result = gsi_start_channel(ep->gsi_chan_hdl);
-	if (result != GSI_STATUS_SUCCESS)
-		goto fail_start_channel;
 	if (ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS)
 		gsi_config_channel_mode(ep->gsi_chan_hdl,
 				GSI_CHAN_MODE_POLL);
 	return 0;
 
-fail_start_channel:
 fail_write_channel_scratch:
 	if (gsi_dealloc_channel(ep->gsi_chan_hdl)
 		!= GSI_STATUS_SUCCESS) {
@@ -3921,7 +3949,12 @@
 		dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
 		&dma_addr, 0);
 	gsi_channel_props.ring_base_addr = dma_addr;
-	gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		gsi_channel_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	else
+		gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+
 	gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
 	gsi_channel_props.low_weight = 1;
 	gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dtsi_replacement.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dtsi_replacement.c
new file mode 100644
index 0000000..5fe2294
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dtsi_replacement.c
@@ -0,0 +1,765 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/msm_ipa.h>
+#include "ipa_i.h"
+#include "ipa_emulation_stubs.h"
+
+# undef strsame
+# define strsame(x, y) \
+	(!strcmp((x), (y)))
+
+/*
+ * The following enum values used to index tables below.
+ */
+enum dtsi_index_e {
+	DTSI_INDEX_3_5_1 = 0,
+	DTSI_INDEX_4_0   = 1,
+};
+
+struct dtsi_replacement_u32 {
+	char *key;
+	u32 value;
+};
+
+struct dtsi_replacement_u32_table {
+	struct dtsi_replacement_u32 *p_table;
+	u32 num_entries;
+};
+
+struct dtsi_replacement_bool {
+	char *key;
+	bool value;
+};
+
+struct dtsi_replacement_bool_table {
+	struct dtsi_replacement_bool *p_table;
+	u32 num_entries;
+};
+
+struct dtsi_replacement_u32_array {
+	char *key;
+	u32 *p_value;
+	u32 num_elements;
+};
+
+struct dtsi_replacement_u32_array_table {
+	struct dtsi_replacement_u32_array *p_table;
+	u32 num_entries;
+};
+
+struct dtsi_replacement_resource_table {
+	struct resource *p_table;
+	u32 num_entries;
+};
+
+/*
+ * Any of the data below with _4_0 in the name represent data taken
+ * from the 4.0 dtsi file.
+ *
+ * Any of the data below with _3_5_1 in the name represent data taken
+ * from the 3.5.1 dtsi file.
+ */
+static struct dtsi_replacement_bool ipa3_plat_drv_bool_4_0[] = {
+	{"qcom,use-ipa-tethering-bridge",       true},
+	{"qcom,modem-cfg-emb-pipe-flt",         true},
+	{"qcom,ipa-wdi2",                       true},
+	{"qcom,use-64-bit-dma-mask",            false},
+	{"qcom,bandwidth-vote-for-ipa",         false},
+	{"qcom,skip-uc-pipe-reset",             false},
+	{"qcom,tethered-flow-control",          true},
+	{"qcom,use-rg10-limitation-mitigation", false},
+	{"qcom,do-not-use-ch-gsi-20",           false},
+	{"qcom,use-ipa-pm",                     false},
+};
+
+static struct dtsi_replacement_bool ipa3_plat_drv_bool_3_5_1[] = {
+	{"qcom,use-ipa-tethering-bridge",       true},
+	{"qcom,modem-cfg-emb-pipe-flt",         true},
+	{"qcom,ipa-wdi2",                       true},
+	{"qcom,use-64-bit-dma-mask",            false},
+	{"qcom,bandwidth-vote-for-ipa",         true},
+	{"qcom,skip-uc-pipe-reset",             false},
+	{"qcom,tethered-flow-control",          false},
+	{"qcom,use-rg10-limitation-mitigation", false},
+	{"qcom,do-not-use-ch-gsi-20",           false},
+	{"qcom,use-ipa-pm",                     false},
+};
+
+static struct dtsi_replacement_bool_table
+ipa3_plat_drv_bool_table[] = {
+	{ ipa3_plat_drv_bool_3_5_1,
+	  ARRAY_SIZE(ipa3_plat_drv_bool_3_5_1) },
+	{ ipa3_plat_drv_bool_4_0,
+	  ARRAY_SIZE(ipa3_plat_drv_bool_4_0) },
+};
+
+static struct dtsi_replacement_u32 ipa3_plat_drv_u32_4_0[] = {
+	{"qcom,ipa-hw-ver",                     IPA_HW_v4_0},
+	{"qcom,ipa-hw-mode",                    3},
+	{"qcom,wan-rx-ring-size",               192},
+	{"qcom,lan-rx-ring-size",               192},
+	{"qcom,ee",                             0},
+	{"emulator-bar0-offset",                0x01C00000},
+};
+
+static struct dtsi_replacement_u32 ipa3_plat_drv_u32_3_5_1[] = {
+	{"qcom,ipa-hw-ver",                     IPA_HW_v3_5_1},
+	{"qcom,ipa-hw-mode",                    3},
+	{"qcom,wan-rx-ring-size",               192},
+	{"qcom,lan-rx-ring-size",               192},
+	{"qcom,ee",                             0},
+	{"emulator-bar0-offset",                0x01C00000},
+};
+
+static struct dtsi_replacement_u32_table ipa3_plat_drv_u32_table[] = {
+	{ ipa3_plat_drv_u32_3_5_1,
+	  ARRAY_SIZE(ipa3_plat_drv_u32_3_5_1) },
+	{ ipa3_plat_drv_u32_4_0,
+	  ARRAY_SIZE(ipa3_plat_drv_u32_4_0) },
+};
+
+static u32 mhi_event_ring_id_limits_array_4_0[] = {
+	9, 10
+};
+
+static u32 mhi_event_ring_id_limits_array_3_5_1[] = {
+	IPA_MHI_GSI_EVENT_RING_ID_START, IPA_MHI_GSI_EVENT_RING_ID_END
+};
+
+static u32 ipa_tz_unlock_reg_array_4_0[] = {
+	0x04043583c, 0x00001000
+};
+
+static u32 ipa_tz_unlock_reg_array_3_5_1[] = {
+	0x04043583c, 0x00001000
+};
+
+static u32 ipa_ram_mmap_array_4_0[] = {
+	0x00000280, 0x00000000, 0x00000000, 0x00000288, 0x00000078,
+	0x00004000, 0x00000308, 0x00000078, 0x00004000, 0x00000388,
+	0x00000078, 0x00004000, 0x00000408, 0x00000078, 0x00004000,
+	0x0000000F, 0x00000000, 0x00000007, 0x00000008, 0x0000000E,
+	0x00000488, 0x00000078, 0x00004000, 0x00000508, 0x00000078,
+	0x00004000, 0x0000000F, 0x00000000, 0x00000007, 0x00000008,
+	0x0000000E, 0x00000588, 0x00000078, 0x00004000, 0x00000608,
+	0x00000078, 0x00004000, 0x00000688, 0x00000140, 0x000007C8,
+	0x00000000, 0x00000800, 0x000007D0, 0x00000200, 0x000009D0,
+	0x00000200, 0x00000000, 0x00000000, 0x00000000, 0x000013F0,
+	0x0000100C, 0x000023FC, 0x00000000, 0x000023FC, 0x00000000,
+	0x000023FC, 0x00000000, 0x000023FC, 0x00000000, 0x00000080,
+	0x00000200, 0x00002800, 0x000023FC, 0x00000000, 0x000023FC,
+	0x00000000, 0x000023FC, 0x00000000, 0x000023FC, 0x00000000,
+	0x00002400, 0x00000400, 0x00000BD8, 0x00000050, 0x00000C30,
+	0x00000060, 0x00000C90, 0x00000140, 0x00000DD0, 0x00000180,
+	0x00000F50, 0x00000180, 0x000010D0, 0x00000180, 0x00001250,
+	0x00000180, 0x000013D0, 0x00000020
+};
+
+static u32 ipa_ram_mmap_array_3_5_1[] = {
+	0x00000280, 0x00000000, 0x00000000, 0x00000288, 0x00000078,
+	0x00004000, 0x00000308, 0x00000078, 0x00004000, 0x00000388,
+	0x00000078, 0x00004000, 0x00000408, 0x00000078, 0x00004000,
+	0x0000000F, 0x00000000, 0x00000007, 0x00000008, 0x0000000E,
+	0x00000488, 0x00000078, 0x00004000, 0x00000508, 0x00000078,
+	0x00004000, 0x0000000F, 0x00000000, 0x00000007, 0x00000008,
+	0x0000000E, 0x00000588, 0x00000078, 0x00004000, 0x00000608,
+	0x00000078, 0x00004000, 0x00000688, 0x00000140, 0x000007C8,
+	0x00000000, 0x00000800, 0x000007D0, 0x00000200, 0x000009D0,
+	0x00000200, 0x00000000, 0x00000000, 0x00000000, 0x00000BD8,
+	0x00001024, 0x00002000, 0x00000000, 0x00002000, 0x00000000,
+	0x00002000, 0x00000000, 0x00002000, 0x00000000, 0x00000080,
+	0x00000200, 0x00002000, 0x00002000, 0x00000000, 0x00002000,
+	0x00000000, 0x00002000, 0x00000000, 0x00002000, 0x00000000,
+	0x00001C00, 0x00000400
+};
+
+struct dtsi_replacement_u32_array ipa3_plat_drv_u32_array_4_0[] = {
+	{"qcom,mhi-event-ring-id-limits",
+	 mhi_event_ring_id_limits_array_4_0,
+	 ARRAY_SIZE(mhi_event_ring_id_limits_array_4_0) },
+	{"qcom,ipa-tz-unlock-reg",
+	 ipa_tz_unlock_reg_array_4_0,
+	 ARRAY_SIZE(ipa_tz_unlock_reg_array_4_0) },
+	{"qcom,ipa-ram-mmap",
+	 ipa_ram_mmap_array_4_0,
+	 ARRAY_SIZE(ipa_ram_mmap_array_4_0) },
+};
+
+struct dtsi_replacement_u32_array ipa3_plat_drv_u32_array_3_5_1[] = {
+	{"qcom,mhi-event-ring-id-limits",
+	 mhi_event_ring_id_limits_array_3_5_1,
+	 ARRAY_SIZE(mhi_event_ring_id_limits_array_3_5_1) },
+	{"qcom,ipa-tz-unlock-reg",
+	 ipa_tz_unlock_reg_array_3_5_1,
+	 ARRAY_SIZE(ipa_tz_unlock_reg_array_3_5_1) },
+	{"qcom,ipa-ram-mmap",
+	 ipa_ram_mmap_array_3_5_1,
+	 ARRAY_SIZE(ipa_ram_mmap_array_3_5_1) },
+};
+
+struct dtsi_replacement_u32_array_table
+ipa3_plat_drv_u32_array_table[] = {
+	{ ipa3_plat_drv_u32_array_3_5_1,
+	  ARRAY_SIZE(ipa3_plat_drv_u32_array_3_5_1) },
+	{ ipa3_plat_drv_u32_array_4_0,
+	  ARRAY_SIZE(ipa3_plat_drv_u32_array_4_0) },
+};
+
+#define INTCTRL_OFFSET       0x083C0000
+#define INTCTRL_SIZE         0x00000110
+
+#define IPA_BASE_OFFSET_4_0  0x01e00000
+#define IPA_BASE_SIZE_4_0    0x00034000
+#define GSI_BASE_OFFSET_4_0  0x01e04000
+#define GSI_BASE_SIZE_4_0    0x00028000
+
+struct resource ipa3_plat_drv_resource_4_0[] = {
+	/*
+	 * PLEASE NOTE WELL: The following offset values below
+	 * ("ipa-base", "gsi-base", and "intctrl-base") are used to
+	 * calculate offsets relative to the PCI BAR0 address provided
+	 * by the PCI probe.  After their use to calculate the
+	 * offsets, they are not used again, since PCI ultimately
+	 * dictates where things live.
+	 */
+	{
+		IPA_BASE_OFFSET_4_0,
+		(IPA_BASE_OFFSET_4_0 + IPA_BASE_SIZE_4_0),
+		"ipa-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		GSI_BASE_OFFSET_4_0,
+		(GSI_BASE_OFFSET_4_0 + GSI_BASE_SIZE_4_0),
+		"gsi-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	/*
+	 * The following entry is germane only to the emulator
+	 * environment.  It is needed to locate the emulator's PCI
+	 * interrupt controller...
+	 */
+	{
+		INTCTRL_OFFSET,
+		(INTCTRL_OFFSET + INTCTRL_SIZE),
+		"intctrl-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		IPA_PIPE_MEM_START_OFST,
+		(IPA_PIPE_MEM_START_OFST + IPA_PIPE_MEM_SIZE),
+		"ipa-pipe-mem",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		0,
+		0,
+		"gsi-irq",
+		IORESOURCE_IRQ,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		0,
+		0,
+		"ipa-irq",
+		IORESOURCE_IRQ,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+};
+
+#define IPA_BASE_OFFSET_3_5_1  0x01e00000
+#define IPA_BASE_SIZE_3_5_1    0x00034000
+#define GSI_BASE_OFFSET_3_5_1  0x01e04000
+#define GSI_BASE_SIZE_3_5_1    0x0002c000
+
+struct resource ipa3_plat_drv_resource_3_5_1[] = {
+	/*
+	 * PLEASE NOTE WELL: The following offset values below
+	 * ("ipa-base", "gsi-base", and "intctrl-base") are used to
+	 * calculate offsets relative to the PCI BAR0 address provided
+	 * by the PCI probe.  After their use to calculate the
+	 * offsets, they are not used again, since PCI ultimately
+	 * dictates where things live.
+	 */
+	{
+		IPA_BASE_OFFSET_3_5_1,
+		(IPA_BASE_OFFSET_3_5_1 + IPA_BASE_SIZE_3_5_1),
+		"ipa-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		GSI_BASE_OFFSET_3_5_1,
+		(GSI_BASE_OFFSET_3_5_1 + GSI_BASE_SIZE_3_5_1),
+		"gsi-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	/*
+	 * The following entry is germane only to the emulator
+	 * environment.  It is needed to locate the emulator's PCI
+	 * interrupt controller...
+	 */
+	{
+		INTCTRL_OFFSET,
+		(INTCTRL_OFFSET + INTCTRL_SIZE),
+		"intctrl-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		IPA_PIPE_MEM_START_OFST,
+		(IPA_PIPE_MEM_START_OFST + IPA_PIPE_MEM_SIZE),
+		"ipa-pipe-mem",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		0,
+		0,
+		"gsi-irq",
+		IORESOURCE_IRQ,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		0,
+		0,
+		"ipa-irq",
+		IORESOURCE_IRQ,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+};
+
+struct dtsi_replacement_resource_table
+ipa3_plat_drv_resource_table[] = {
+	{ ipa3_plat_drv_resource_3_5_1,
+	  ARRAY_SIZE(ipa3_plat_drv_resource_3_5_1) },
+	{ ipa3_plat_drv_resource_4_0,
+	  ARRAY_SIZE(ipa3_plat_drv_resource_4_0) },
+};
+
+/*
+ * The following code uses the data above...
+ */
+static u32 emulator_type_to_index(void)
+{
+	/*
+	 * Use the input parameter to the IPA driver loadable module,
+	 * which specifies the type of hardware the driver is running
+	 * on.
+	 */
+	u32 index = DTSI_INDEX_4_0;
+	uint emulation_type = ipa3_get_emulation_type();
+
+	switch (emulation_type) {
+	case IPA_HW_v3_5_1:
+		index = DTSI_INDEX_3_5_1;
+		break;
+	case IPA_HW_v4_0:
+		index = DTSI_INDEX_4_0;
+		break;
+	default:
+		break;
+	}
+
+	IPADBG("emulation_type(%u) emulation_index(%u)\n",
+	       emulation_type, index);
+
+	return index;
+}
+
+/* From include/linux/of.h */
+/**
+ * emulator_of_property_read_bool - Find from a property
+ * @np:         device node from which the property value is to be read.
+ * @propname:   name of the property to be searched.
+ *
+ * Search for a property in a device node.
+ * Returns true if the property exists false otherwise.
+ */
+bool emulator_of_property_read_bool(
+	const struct device_node *np,
+	const char *propname)
+{
+	u16 i;
+	u32 index;
+	struct dtsi_replacement_bool *ipa3_plat_drv_boolP;
+
+	/*
+	 * Get the index for the type of hardware we're running on.
+	 * This is used as a table index.
+	 */
+	index = emulator_type_to_index();
+	if (index >= ARRAY_SIZE(ipa3_plat_drv_bool_table)) {
+		IPADBG(
+		    "Did not find ipa3_plat_drv_bool_table for index %u\n",
+		    index);
+		return false;
+	}
+
+	ipa3_plat_drv_boolP =
+	    ipa3_plat_drv_bool_table[index].p_table;
+
+	for (i = 0;
+	     i < ipa3_plat_drv_bool_table[index].num_entries;
+	     i++) {
+		if (strsame(ipa3_plat_drv_boolP[i].key, propname)) {
+			IPADBG(
+			    "Found value %u for propname %s index %u\n",
+			    ipa3_plat_drv_boolP[i].value,
+			    propname,
+			    index);
+			return ipa3_plat_drv_boolP[i].value;
+		}
+	}
+
+	IPADBG("Did not find match for propname %s index %u\n",
+	       propname,
+	       index);
+
+	return false;
+}
+
+/* From include/linux/of.h */
+int emulator_of_property_read_u32(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_value)
+{
+	u16 i;
+	u32 index;
+	struct dtsi_replacement_u32 *ipa3_plat_drv_u32P;
+
+	/*
+	 * Get the index for the type of hardware we're running on.
+	 * This is used as a table index.
+	 */
+	index = emulator_type_to_index();
+	if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_table)) {
+		IPADBG(
+		    "Did not find ipa3_plat_drv_u32_table for index %u\n",
+		    index);
+		return false;
+	}
+
+	ipa3_plat_drv_u32P =
+	    ipa3_plat_drv_u32_table[index].p_table;
+
+	for (i = 0;
+	     i < ipa3_plat_drv_u32_table[index].num_entries;
+	     i++) {
+		if (strsame(ipa3_plat_drv_u32P[i].key, propname)) {
+			*out_value = ipa3_plat_drv_u32P[i].value;
+			IPADBG(
+			    "Found value %u for propname %s index %u\n",
+			    ipa3_plat_drv_u32P[i].value,
+			    propname,
+			    index);
+			return 0;
+		}
+	}
+
+	IPADBG("Did not find match for propname %s index %u\n",
+	       propname,
+	       index);
+
+	return -EINVAL;
+}
+
+/* From include/linux/of.h */
+/**
+ * emulator_of_property_read_u32_array - Find and read an array of 32
+ * bit integers from a property.
+ *
+ * @np:         device node from which the property value is to be read.
+ * @propname:   name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz:         number of array elements to read
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u32 value can be decoded.
+ */
+int emulator_of_property_read_u32_array(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_values,
+	size_t sz)
+{
+	u16 i;
+	u32 index;
+	struct dtsi_replacement_u32_array *u32_arrayP;
+
+	/*
+	 * Get the index for the type of hardware we're running on.
+	 * This is used as a table index.
+	 */
+	index = emulator_type_to_index();
+	if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_array_table)) {
+		IPADBG(
+		    "Did not find ipa3_plat_drv_u32_array_table for index %u\n",
+		    index);
+		return false;
+	}
+
+	u32_arrayP =
+		ipa3_plat_drv_u32_array_table[index].p_table;
+	for (i = 0;
+	     i < ipa3_plat_drv_u32_array_table[index].num_entries;
+	     i++) {
+		if (strsame(
+			u32_arrayP[i].key, propname)) {
+			u32 num_elements =
+			    u32_arrayP[i].num_elements;
+			u32 *p_element =
+			    &u32_arrayP[i].p_value[0];
+			size_t j = 0;
+
+			if (num_elements > sz) {
+				IPAERR(
+				    "Found array of %u values for propname %s; only room for %u elements in copy buffer\n",
+				    num_elements,
+				    propname,
+				    (unsigned int) sz);
+				return -EOVERFLOW;
+			}
+
+			while (j++ < num_elements)
+				*out_values++ = *p_element++;
+
+			IPADBG(
+			    "Found array of values starting with %u for propname %s index %u\n",
+			    u32_arrayP[i].p_value[0],
+			    propname,
+			    index);
+
+			return 0;
+		}
+	}
+
+	IPADBG("Did not find match for propname %s index %u\n",
+	       propname,
+	       index);
+
+	return -EINVAL;
+}
+
+/* From drivers/base/platform.c */
+/**
+ * emulator_platform_get_resource_byname - get a resource for a device by name
+ * @dev: platform device
+ * @type: resource type
+ * @name: resource name
+ */
+struct resource *emulator_platform_get_resource_byname(
+	struct platform_device *dev,
+	unsigned int type,
+	const char *name)
+{
+	u16 i;
+	u32 index;
+	struct resource *ipa3_plat_drv_resourceP;
+
+	/*
+	 * Get the index for the type of hardware we're running on.
+	 * This is used as a table index.
+	 */
+	index = emulator_type_to_index();
+	if (index >= ARRAY_SIZE(ipa3_plat_drv_resource_table)) {
+		IPADBG(
+		    "Did not find ipa3_plat_drv_resource_table for index %u\n",
+		    index);
+		return false;
+	}
+
+	ipa3_plat_drv_resourceP =
+		ipa3_plat_drv_resource_table[index].p_table;
+	for (i = 0;
+	     i < ipa3_plat_drv_resource_table[index].num_entries;
+	     i++) {
+		struct resource *r = &ipa3_plat_drv_resourceP[i];
+
+		if (type == resource_type(r) && strsame(r->name, name)) {
+			IPADBG(
+			    "Found start 0x%x size %u for name %s index %u\n",
+			    (unsigned int) (r->start),
+			    (unsigned int) (resource_size(r)),
+			    name,
+			    index);
+			return r;
+		}
+	}
+
+	IPADBG("Did not find match for name %s index %u\n",
+	       name,
+	       index);
+
+	return NULL;
+}
+
+/* From drivers/of/base.c */
+/**
+ * emulator_of_property_count_elems_of_size - Count the number of
+ * elements in a property
+ *
+ * @np:         device node from which the property value is to
+ *              be read. Not used.
+ * @propname:   name of the property to be searched.
+ * @elem_size:  size of the individual element
+ *
+ * Search for a property and count the number of elements of size
+ * elem_size in it. Returns number of elements on success, -EINVAL if
+ * the property does not exist or its length does not match a multiple
+ * of elem_size and -ENODATA if the property does not have a value.
+ */
+int emulator_of_property_count_elems_of_size(
+	const struct device_node *np,
+	const char *propname,
+	int elem_size)
+{
+	u32 index;
+
+	/*
+	 * Get the index for the type of hardware we're running on.
+	 * This is used as a table index.
+	 */
+	index = emulator_type_to_index();
+
+	/*
+	 * Use elem_size to determine which table to search for the
+	 * specified property name
+	 */
+	if (elem_size == sizeof(u32)) {
+		u16 i;
+		struct dtsi_replacement_u32_array *u32_arrayP;
+
+		if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_array_table)) {
+			IPADBG(
+			    "Did not find ipa3_plat_drv_u32_array_table for index %u\n",
+			    index);
+			return false;
+		}
+
+		u32_arrayP =
+			ipa3_plat_drv_u32_array_table[index].p_table;
+
+		for (i = 0;
+		     i < ipa3_plat_drv_u32_array_table[index].num_entries;
+		     i++) {
+			if (strsame(u32_arrayP[i].key, propname)) {
+				if (u32_arrayP[i].p_value == NULL) {
+					IPADBG(
+					    "Found no elements for propname %s index %u\n",
+					    propname,
+					    index);
+					return -ENODATA;
+				}
+
+				IPADBG(
+				    "Found %u elements for propname %s index %u\n",
+				    u32_arrayP[i].num_elements,
+				    propname,
+				    index);
+
+				return u32_arrayP[i].num_elements;
+			}
+		}
+
+		IPADBG(
+		    "Found no match in table with elem_size %d for propname %s index %u\n",
+		    elem_size,
+		    propname,
+		    index);
+
+		return -EINVAL;
+	}
+
+	IPAERR(
+	    "Found no tables with element size %u to search for propname %s index %u\n",
+	    elem_size,
+	    propname,
+	    index);
+
+	return -EINVAL;
+}
+
+int emulator_of_property_read_variable_u32_array(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_values,
+	size_t sz_min,
+	size_t sz_max)
+{
+	return emulator_of_property_read_u32_array(
+	    np, propname, out_values, sz_max);
+}
+
+resource_size_t emulator_resource_size(const struct resource *res)
+{
+	return res->end - res->start;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_emulation_stubs.h b/drivers/platform/msm/ipa/ipa_v3/ipa_emulation_stubs.h
new file mode 100644
index 0000000..cf4c7c9
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_emulation_stubs.h
@@ -0,0 +1,125 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_IPA_EMULATION_STUBS_H_)
+# define _IPA_EMULATION_STUBS_H_
+
+# define clk_get(x, y) ((struct clk *) -(MAX_ERRNO+1))
+# define clk_put(x)                do { } while (0)
+# define clk_prepare(x)            do { } while (0)
+# define clk_enable(x)             do { } while (0)
+# define clk_set_rate(x, y)        do { } while (0)
+# define clk_disable_unprepare(x)  do { } while (0)
+
+# define outer_flush_range(x, y)
+# define __flush_dcache_area(x, y)
+# define __cpuc_flush_dcache_area(x, y) __flush_dcache_area(x, y)
+
+/* Point several API calls to these new EMULATION functions */
+# define of_property_read_bool(np, propname)	 \
+	emulator_of_property_read_bool(NULL, propname)
+# define of_property_read_u32(np, propname, out_value)   \
+	emulator_of_property_read_u32(NULL, propname, out_value)
+# define of_property_read_u32_array(np, propname, out_values, sz)	\
+	emulator_of_property_read_u32_array(NULL, propname, out_values, sz)
+# define platform_get_resource_byname(dev, type, name) \
+	emulator_platform_get_resource_byname(NULL, type, name)
+# define of_property_count_elems_of_size(np, propname, elem_size) \
+	emulator_of_property_count_elems_of_size(NULL, propname, elem_size)
+# define of_property_read_variable_u32_array( \
+	np, propname, out_values, sz_min, sz_max) \
+	emulator_of_property_read_variable_u32_array( \
+		NULL, propname, out_values, sz_min, sz_max)
+# define resource_size(res) \
+	emulator_resource_size(res)
+
+/**
+ * emulator_of_property_read_bool - Findfrom a property
+ * @np:         device node used to find the property value. (not used)
+ * @propname:   name of the property to be searched.
+ *
+ * Search for a property in a device node.
+ * Returns true if the property exists false otherwise.
+ */
+bool emulator_of_property_read_bool(
+	const struct device_node *np,
+	const char *propname);
+
+int emulator_of_property_read_u32(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_value);
+
+/**
+ * emulator_of_property_read_u32_array - Find and read an array of 32
+ * bit integers from a property.
+ *
+ * @np:         device node used to find the property value. (not used)
+ * @propname:   name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz:         number of array elements to read
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u32 value can be decoded.
+ */
+int emulator_of_property_read_u32_array(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_values,
+	size_t sz);
+
+/**
+ * emulator_platform_get_resource_byname - get a resource for a device
+ * by name
+ *
+ * @dev: platform device
+ * @type: resource type
+ * @name: resource name
+ */
+struct resource *emulator_platform_get_resource_byname(
+	struct platform_device *dev,
+	unsigned int type,
+	const char *name);
+
+/**
+ * emulator_of_property_count_elems_of_size - Count the number of
+ * elements in a property
+ *
+ * @np:         device node used to find the property value. (not used)
+ * @propname:   name of the property to be searched.
+ * @elem_size:  size of the individual element
+ *
+ * Search for a property and count the number of elements of size
+ * elem_size in it. Returns number of elements on success, -EINVAL if
+ * the property does not exist or its length does not match a multiple
+ * of elem_size and -ENODATA if the property does not have a value.
+ */
+int emulator_of_property_count_elems_of_size(
+	const struct device_node *np,
+	const char *propname,
+	int elem_size);
+
+int emulator_of_property_read_variable_u32_array(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_values,
+	size_t sz_min,
+	size_t sz_max);
+
+resource_size_t emulator_resource_size(
+	const struct resource *res);
+
+#endif /* #if !defined(_IPA_EMULATION_STUBS_H_) */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index 6742773..b090151 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -1014,41 +1014,12 @@
 		goto error;
 	}
 
+	if (__ipa_validate_flt_rule(&frule->rule, &rt_tbl, ip))
+		goto error;
+
 	if (entry->rt_tbl)
 		entry->rt_tbl->ref_cnt--;
 
-	if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
-		if (!frule->rule.eq_attrib_type) {
-			if (!frule->rule.rt_tbl_hdl) {
-				IPAERR_RL("invalid RT tbl\n");
-				goto error;
-			}
-
-			rt_tbl = ipa3_id_find(frule->rule.rt_tbl_hdl);
-			if (rt_tbl == NULL) {
-				IPAERR_RL("RT tbl not found\n");
-				goto error;
-			}
-
-			if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
-				IPAERR_RL("RT table cookie is invalid\n");
-				goto error;
-			}
-		} else {
-			if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
-				IPA_MEM_PART(v4_modem_rt_index_hi) :
-				IPA_MEM_PART(v6_modem_rt_index_hi))) {
-				IPAERR_RL("invalid RT tbl\n");
-				goto error;
-			}
-		}
-	} else {
-		if (frule->rule.rt_tbl_idx > 0) {
-			IPAERR_RL("invalid RT tbl\n");
-			goto error;
-		}
-	}
-
 	entry->rule = frule->rule;
 	entry->rt_tbl = rt_tbl;
 	if (entry->rt_tbl)
@@ -1447,8 +1418,16 @@
 			}
 		}
 	}
-	mutex_unlock(&ipa3_ctx->lock);
 
+	/* commit the change to IPA-HW */
+	if (ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4) ||
+		ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6)) {
+		IPAERR("fail to commit flt-rule\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		mutex_unlock(&ipa3_ctx->lock);
+		return -EPERM;
+	}
+	mutex_unlock(&ipa3_ctx->lock);
 	return 0;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index cecbef0..4eb8edb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -996,8 +996,9 @@
 	struct ipa_hdr_offset_entry *off_next;
 	struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry;
 	struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next;
-	int i, end = 0;
-	bool user_rule = false;
+	struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
+	struct ipa3_hdr_proc_ctx_tbl *htbl_proc = &ipa3_ctx->hdr_proc_ctx_tbl;
+	int i;
 
 	/*
 	 * issue a reset on the routing module since routing rules point to
@@ -1035,9 +1036,6 @@
 			return -EFAULT;
 		}
 
-		if (entry->ipacm_installed)
-			user_rule = true;
-
 		if (!user_only || entry->ipacm_installed) {
 			if (entry->is_hdr_proc_ctx) {
 				dma_unmap_single(ipa3_ctx->pdev,
@@ -1045,9 +1043,15 @@
 					entry->hdr_len,
 					DMA_TO_DEVICE);
 				entry->proc_ctx = NULL;
+			} else {
+				/* move the offset entry to free list */
+				entry->offset_entry->ipacm_installed = 0;
+				list_move(&entry->offset_entry->link,
+				&htbl->head_free_offset_list[
+					entry->offset_entry->bin]);
 			}
 			list_del(&entry->link);
-			ipa3_ctx->hdr_tbl.hdr_cnt--;
+			htbl->hdr_cnt--;
 			entry->ref_cnt = 0;
 			entry->cookie = 0;
 
@@ -1056,53 +1060,37 @@
 			kmem_cache_free(ipa3_ctx->hdr_cache, entry);
 		}
 	}
-	for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
-		list_for_each_entry_safe(off_entry, off_next,
+
+	/* only clean up offset_list and free_offset_list on global reset */
+	if (!user_only) {
+		for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+			list_for_each_entry_safe(off_entry, off_next,
 					 &ipa3_ctx->hdr_tbl.head_offset_list[i],
 					 link) {
-
-			/*
-			 * do not remove the default exception header which is
-			 * at offset 0
-			 */
-			if (off_entry->offset == 0)
-				continue;
-
-			if (!user_only ||
-					off_entry->ipacm_installed) {
+				/**
+				 * do not remove the default exception
+				 * header which is at offset 0
+				 */
+				if (off_entry->offset == 0)
+					continue;
 				list_del(&off_entry->link);
 				kmem_cache_free(ipa3_ctx->hdr_offset_cache,
 					off_entry);
-			} else {
-				if (off_entry->offset +
-					ipa_hdr_bin_sz[off_entry->bin] > end) {
-					end = off_entry->offset +
-						ipa_hdr_bin_sz[off_entry->bin];
-					IPADBG("replace end = %d\n", end);
-				}
 			}
-		}
-		list_for_each_entry_safe(off_entry, off_next,
+			list_for_each_entry_safe(off_entry, off_next,
 				&ipa3_ctx->hdr_tbl.head_free_offset_list[i],
 				link) {
-
-			if (!user_only ||
-					off_entry->ipacm_installed) {
 				list_del(&off_entry->link);
 				kmem_cache_free(ipa3_ctx->hdr_offset_cache,
 					off_entry);
 			}
 		}
+		/* there is one header of size 8 */
+		ipa3_ctx->hdr_tbl.end = 8;
+		ipa3_ctx->hdr_tbl.hdr_cnt = 1;
 	}
 
-	IPADBG("hdr_tbl.end = %d\n", end);
-	if (user_rule) {
-		ipa3_ctx->hdr_tbl.end = end;
-		IPADBG("hdr_tbl.end = %d\n", end);
-	}
 	IPADBG("reset hdr proc ctx\n");
-	user_rule = false;
-	end = 0;
 	list_for_each_entry_safe(
 		ctx_entry,
 		ctx_next,
@@ -1115,13 +1103,14 @@
 			return -EFAULT;
 		}
 
-		if (entry->ipacm_installed)
-			user_rule = true;
-
 		if (!user_only ||
 				ctx_entry->ipacm_installed) {
+			/* move the offset entry to appropriate free list */
+			list_move(&ctx_entry->offset_entry->link,
+				&htbl_proc->head_free_offset_list[
+					ctx_entry->offset_entry->bin]);
 			list_del(&ctx_entry->link);
-			ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt--;
+			htbl_proc->proc_ctx_cnt--;
 			ctx_entry->ref_cnt = 0;
 			ctx_entry->cookie = 0;
 
@@ -1131,48 +1120,39 @@
 				ctx_entry);
 		}
 	}
-	for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
-		list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+	/* only clean up offset_list and free_offset_list on global reset */
+	if (!user_only) {
+		for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+			list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
 				&ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
 				link) {
-
-			if (!user_only ||
-					ctx_off_entry->ipacm_installed) {
 				list_del(&ctx_off_entry->link);
 				kmem_cache_free(
 					ipa3_ctx->hdr_proc_ctx_offset_cache,
 					ctx_off_entry);
-			} else {
-				if (ctx_off_entry->offset +
-					ipa_hdr_bin_sz[ctx_off_entry->bin]
-					> end) {
-					end = ctx_off_entry->offset +
-					ipa_hdr_bin_sz[ctx_off_entry->bin];
-					IPADBG("replace hdr_proc as %d\n", end);
-				}
 			}
-		}
-		list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
-			&ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
-			link) {
-
-			if (!user_only ||
-					ctx_off_entry->ipacm_installed) {
+			list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+				&ipa3_ctx->hdr_proc_ctx_tbl.
+				head_free_offset_list[i], link) {
 				list_del(&ctx_off_entry->link);
 				kmem_cache_free(
 					ipa3_ctx->hdr_proc_ctx_offset_cache,
 					ctx_off_entry);
 			}
 		}
+		ipa3_ctx->hdr_proc_ctx_tbl.end = 0;
+		ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
 	}
 
-	IPADBG("hdr_proc_tbl.end = %d\n", end);
-	if (user_rule) {
-		ipa3_ctx->hdr_proc_ctx_tbl.end = end;
-		IPADBG("hdr_proc_tbl.end = %d\n", end);
+	/* commit the change to IPA-HW */
+	if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+		IPAERR("fail to commit hdr\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		mutex_unlock(&ipa3_ctx->lock);
+		return -EFAULT;
 	}
+
 	mutex_unlock(&ipa3_ctx->lock);
-
 	return 0;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 56b5740..43f4c74 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -203,6 +203,188 @@
 #define IPA_WDI_TX_DB_RES			7
 #define IPA_WDI_MAX_RES				8
 
+#ifdef CONFIG_ARM64
+/* Outer caches unsupported on ARM64 platforms */
+# define outer_flush_range(x, y)
+# define __cpuc_flush_dcache_area __flush_dcache_area
+#endif
+
+#define IPA_GPIO_IN_QUERY_CLK_IDX 0
+#define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0
+#define IPA_GPIO_OUT_CLK_VOTE_IDX 1
+
+#define IPA_SUMMING_THRESHOLD (0x10)
+#define IPA_PIPE_MEM_START_OFST (0x0)
+#define IPA_PIPE_MEM_SIZE (0x0)
+#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
+			       x == IPA_MODE_MOBILE_AP_WAN || \
+			       x == IPA_MODE_MOBILE_AP_WLAN)
+#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
+#define IPA_A5_MUX_HEADER_LENGTH (8)
+
+#define IPA_AGGR_MAX_STR_LENGTH (10)
+
+#define CLEANUP_TAG_PROCESS_TIMEOUT 500
+
+#define IPA_AGGR_STR_IN_BYTES(str) \
+	(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
+
+#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
+
+#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
+
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
+
+#define IPA_MHI_GSI_EVENT_RING_ID_START 10
+#define IPA_MHI_GSI_EVENT_RING_ID_END 12
+
+#define IPA_SMEM_SIZE (8 * 1024)
+
+#define IPA_GSI_CHANNEL_HALT_MIN_SLEEP 5000
+#define IPA_GSI_CHANNEL_HALT_MAX_SLEEP 10000
+#define IPA_GSI_CHANNEL_HALT_MAX_TRY 10
+
+/* round addresses for closes page per SMMU requirements */
+#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
+	do { \
+		(iova_p) = rounddown((iova), PAGE_SIZE); \
+		(pa_p) = rounddown((pa), PAGE_SIZE); \
+		(size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \
+	} while (0)
+
+
+/* The relative location in /lib/firmware where the FWs will reside */
+#define IPA_FWS_PATH         "ipa/ipa_fws.elf"
+/*
+ * The following paths below are used when building the system for the
+ * emulation environment or when IPA_EMULATION_COMPILE == 1.
+ *
+ * As new hardware platforms are added into the emulation environment,
+ * please add the appropriate paths here for their firmwares.
+ */
+#define IPA_FWS_PATH_4_0     "ipa/4.0/ipa_fws.elf"
+#define IPA_FWS_PATH_3_5_1   "ipa/3.5.1/ipa_fws.elf"
+
+#ifdef CONFIG_COMPAT
+#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_HDR, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_HDR, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_RT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_FLT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_RT_TBL, \
+				compat_uptr_t)
+#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_COPY_HDR, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+					compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
+					compat_uptr_t)
+#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_HDR, \
+				compat_uptr_t)
+#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_NAT_MEM, \
+				compat_uptr_t)
+#define IPA_IOC_ALLOC_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_NAT_TABLE, \
+				compat_uptr_t)
+#define IPA_IOC_ALLOC_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_IPV6CT_TABLE, \
+				compat_uptr_t)
+#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_INIT_NAT, \
+				compat_uptr_t)
+#define IPA_IOC_INIT_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_INIT_IPV6CT_TABLE, \
+				compat_uptr_t)
+#define IPA_IOC_TABLE_DMA_CMD32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_TABLE_DMA_CMD, \
+				compat_uptr_t)
+#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_DEL_NAT, \
+				compat_uptr_t)
+#define IPA_IOC_DEL_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_NAT_TABLE, \
+				compat_uptr_t)
+#define IPA_IOC_DEL_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_IPV6CT_TABLE, \
+				compat_uptr_t)
+#define IPA_IOC_NAT_MODIFY_PDN32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NAT_MODIFY_PDN, \
+				compat_uptr_t)
+#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_NAT_OFFSET, \
+				compat_uptr_t)
+#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PULL_MSG, \
+				compat_uptr_t)
+#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_ADD_DEPENDENCY, \
+				compat_uptr_t)
+#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_DEL_DEPENDENCY, \
+				compat_uptr_t)
+#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GENERATE_FLT_EQ, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_RT_TBL_INDEX, \
+				compat_uptr_t)
+#define IPA_IOC_WRITE_QMAPID32  _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_WRITE_QMAPID, \
+				compat_uptr_t)
+#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_MDFY_FLT_RULE, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_HDR_PROC_CTX, \
+				compat_uptr_t)
+#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_HDR_PROC_CTX, \
+				compat_uptr_t)
+#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_MDFY_RT_RULE, \
+				compat_uptr_t)
+#endif /* #ifdef CONFIG_COMPAT */
+
+#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
+#define TZ_MEM_PROTECT_REGION_ID 0x10
+
 struct ipa3_active_client_htable_entry {
 	struct hlist_node list;
 	char id_string[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
@@ -558,6 +740,8 @@
  * @qmi_request_sent: Indicates whether QMI request to enable clear data path
  *					request is sent or not.
  * @napi_enabled: when true, IPA call client callback to start polling
+ * @client_lock_unlock: callback function to take mutex lock/unlock for USB
+ *				clients
  */
 struct ipa3_ep_context {
 	int valid;
@@ -590,6 +774,8 @@
 	u32 eot_in_poll_err;
 	bool ep_delay_set;
 
+	int (*client_lock_unlock)(bool is_lock);
+
 	/* sys MUST be the last element of this struct */
 	struct ipa3_sys_context *sys;
 };
@@ -912,11 +1098,13 @@
  * @IPA_HW_Normal: Regular IPA hardware
  * @IPA_HW_Virtual: IPA hardware supporting virtual memory allocation
  * @IPA_HW_PCIE: IPA hardware supporting memory allocation over PCIE Bridge
+ * @IPA_HW_Emulation: IPA emulation hardware
  */
 enum ipa3_hw_mode {
-	IPA_HW_MODE_NORMAL  = 0,
-	IPA_HW_MODE_VIRTUAL = 1,
-	IPA_HW_MODE_PCIE    = 2
+	IPA_HW_MODE_NORMAL    = 0,
+	IPA_HW_MODE_VIRTUAL   = 1,
+	IPA_HW_MODE_PCIE      = 2,
+	IPA_HW_MODE_EMULATION = 3,
 };
 
 enum ipa3_config_this_ep {
@@ -1207,6 +1395,7 @@
  * @mode: IPA operating mode
  * @mmio: iomem
  * @ipa_wrapper_base: IPA wrapper base address
+ * @ipa_wrapper_size: size of the memory pointed to by ipa_wrapper_base
  * @hdr_tbl: IPA header table
  * @hdr_proc_ctx_tbl: IPA processing context table
  * @rt_tbl_set: list of routing tables each of which is a list of rules
@@ -1415,6 +1604,7 @@
 	struct mutex ipa_cne_evt_lock;
 	bool use_ipa_pm;
 	bool vlan_mode_iface[IPA_VLAN_IF_MAX];
+	bool wdi_over_pcie;
 };
 
 struct ipa3_plat_drv_res {
@@ -1423,6 +1613,9 @@
 	u32 ipa_mem_size;
 	u32 transport_mem_base;
 	u32 transport_mem_size;
+	u32 emulator_intcntrlr_mem_base;
+	u32 emulator_intcntrlr_mem_size;
+	u32 emulator_irq;
 	u32 ipa_irq;
 	u32 transport_irq;
 	u32 ipa_pipe_mem_start_ofst;
@@ -1446,6 +1639,7 @@
 	struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
 	bool use_ipa_pm;
 	struct ipa_pm_init_params pm_init;
+	bool wdi_over_pcie;
 };
 
 /**
@@ -1726,6 +1920,9 @@
 int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id);
 
 void ipa3_xdci_ep_delay_rm(u32 clnt_hdl);
+void ipa3_register_lock_unlock_callback(int (*client_cb)(bool), u32 ipa_ep_idx);
+void ipa3_deregister_lock_unlock_callback(u32 ipa_ep_idx);
+void ipa3_set_usb_prod_pipe_delay(void);
 
 int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 	bool should_force_clear, u32 qmi_req_id, bool is_dpl);
@@ -2063,6 +2260,8 @@
  */
 int ipa3_get_ep_mapping(enum ipa_client_type client);
 
+enum gsi_prefetch_mode ipa_get_ep_prefetch_mode(enum ipa_client_type client);
+
 bool ipa3_is_ready(void);
 
 void ipa3_proxy_clk_vote(void);
@@ -2355,6 +2554,10 @@
 void ipa3_inc_acquire_wakelock(void);
 void ipa3_dec_release_wakelock(void);
 int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base);
+int emulator_load_fws(
+	const struct firmware *firmware,
+	u32 transport_mem_base,
+	u32 transport_mem_size);
 int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data);
 const char *ipa_hw_error_str(enum ipa3_hw_errors err_type);
 int ipa_gsi_ch20_wa(void);
@@ -2381,4 +2584,9 @@
 void ipa3_init_imm_cmd_desc(struct ipa3_desc *desc,
 	struct ipahal_imm_cmd_pyld *cmd_pyld);
 int ipa3_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res);
+uint ipa3_get_emulation_type(void);
+int ipa3_get_transport_info(
+	phys_addr_t *phys_addr_ptr,
+	unsigned long *size_ptr);
+irq_handler_t ipa3_get_isr(void);
 #endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
index d69d6ae..46b7434 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -339,6 +339,12 @@
 	ipa3_dec_client_disable_clks_no_block(&log_info);
 	return IRQ_HANDLED;
 }
+
+irq_handler_t ipa3_get_isr(void)
+{
+	return ipa3_isr;
+}
+
 /**
 * ipa3_add_interrupt_handler() - Adds handler to an interrupt type
 * @interrupt:		Interrupt type
@@ -486,21 +492,35 @@
 		return -ENOMEM;
 	}
 
-	res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr,
-				IRQF_TRIGGER_RISING, "ipa", ipa_dev);
-	if (res) {
-		IPAERR("fail to register IPA IRQ handler irq=%d\n", ipa_irq);
-		return -ENODEV;
+	/*
+	 * NOTE:
+	 *
+	 *  We'll only register an isr on non-emulator (ie. real UE)
+	 *  systems.
+	 *
+	 *  On the emulator, emulator_soft_irq_isr() will be calling
+	 *  ipa3_isr, so hence, no isr registration here, and instead,
+	 *  we'll pass the address of ipa3_isr to the gsi layer where
+	 *  emulator interrupts are handled...
+	 */
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
+		res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr,
+					IRQF_TRIGGER_RISING, "ipa", ipa_dev);
+		if (res) {
+			IPAERR(
+			    "fail to register IPA IRQ handler irq=%d\n",
+			    ipa_irq);
+			return -ENODEV;
+		}
+		IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq);
+
+		res = enable_irq_wake(ipa_irq);
+		if (res)
+			IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n",
+				   ipa_irq, res);
+		else
+			IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq);
 	}
-	IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq);
-
-	res = enable_irq_wake(ipa_irq);
-	if (res)
-		IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n",
-				ipa_irq, res);
-	else
-		IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq);
-
 	spin_lock_init(&suspend_wa_lock);
 	return 0;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
index 82cd8187..dd97038 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -56,9 +56,9 @@
 
 
 #define IPA_MHI_FUNC_ENTRY() \
-	IPA_MHI_DBG_LOW("ENTRY\n")
+	IPA_MHI_DBG("ENTRY\n")
 #define IPA_MHI_FUNC_EXIT() \
-	IPA_MHI_DBG_LOW("EXIT\n")
+	IPA_MHI_DBG("EXIT\n")
 
 #define IPA_MHI_MAX_UL_CHANNELS 1
 #define IPA_MHI_MAX_DL_CHANNELS 1
@@ -198,6 +198,7 @@
 	union __packed gsi_channel_scratch ch_scratch;
 	struct ipa3_ep_context *ep;
 	const struct ipa_gsi_ep_config *ep_cfg;
+	bool burst_mode_enabled = false;
 
 	IPA_MHI_FUNC_ENTRY();
 
@@ -280,8 +281,21 @@
 	ch_props.ring_len = params->ch_ctx_host->rlen;
 	ch_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
 			params->ch_ctx_host->rbase);
-	ch_props.use_db_eng = GSI_CHAN_DB_MODE;
+
+	if (params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
+		params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE) {
+		burst_mode_enabled = true;
+	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 &&
+		!burst_mode_enabled)
+		ch_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	else
+		ch_props.use_db_eng = GSI_CHAN_DB_MODE;
+
 	ch_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	ch_props.prefetch_mode =
+		ipa_get_ep_prefetch_mode(client);
 	ch_props.low_weight = 1;
 	ch_props.err_cb = params->ch_err_cb;
 	ch_props.chan_user_data = params->channel;
@@ -307,9 +321,9 @@
 		ch_scratch.mhi.outstanding_threshold = 0;
 	}
 	ch_scratch.mhi.oob_mod_threshold = 4;
-	if (params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
-		params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE) {
-		ch_scratch.mhi.burst_mode_enabled = true;
+
+	if (burst_mode_enabled) {
+		ch_scratch.mhi.burst_mode_enabled = burst_mode_enabled;
 		ch_scratch.mhi.polling_configuration =
 			ipa3_mhi_get_ch_poll_cfg(client, params->ch_ctx_host,
 				(ch_props.ring_len / ch_props.re_size));
@@ -549,6 +563,7 @@
 	int res;
 	int ipa_ep_idx;
 	struct ipa3_ep_context *ep;
+	union __packed gsi_channel_scratch gsi_ch_scratch;
 
 	IPA_MHI_FUNC_ENTRY();
 
@@ -563,11 +578,34 @@
 		/*
 		 * set polling mode bit to DB mode before
 		 * resuming the channel
+		 *
+		 * For MHI-->IPA pipes:
+		 * when resuming due to transition to M0,
+		 * set the polling mode bit to 0.
+		 * In other cases, restore it's value form
+		 * when you stopped the channel.
+		 * Here, after successful resume client move to M0 state.
+		 * So, by default setting polling mode bit to 0.
+		 *
+		 * For IPA-->MHI pipe:
+		 * always restore the polling mode bit.
 		 */
-		res = gsi_write_channel_scratch(
-			ep->gsi_chan_hdl, ch_scratch);
+
+		res = gsi_read_channel_scratch(
+			ep->gsi_chan_hdl, &gsi_ch_scratch);
 		if (res) {
-			IPA_MHI_ERR("write ch scratch fail %d\n"
+			IPA_MHI_ERR("Read ch scratch fail %d\n"
+				, res);
+			return res;
+		}
+
+		if (IPA_CLIENT_IS_PROD(client))
+			gsi_ch_scratch.mhi.polling_mode = false;
+
+		res = gsi_write_channel_scratch(
+			ep->gsi_chan_hdl, gsi_ch_scratch);
+		if (res) {
+			IPA_MHI_ERR("Write ch scratch fail %d\n"
 				, res);
 			return res;
 		}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index b48f2c4..7065e2c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -1258,6 +1258,11 @@
 		goto bail;
 	}
 
+	if (!ipa3_ctx->nat_mem.dev.is_dev_init) {
+		IPAERR_RL("NAT hasn't been initialized\n");
+		return -EPERM;
+	}
+
 	for (cnt = 0; cnt < dma->entries; ++cnt) {
 		result = ipa3_table_validate_table_dma_one(&dma->dma[cnt]);
 		if (result) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
index 0772dde..47a03f9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
@@ -16,7 +16,7 @@
 #include <linux/msm_ipa.h>
 
 /* internal to ipa */
-#define IPA_PM_MAX_CLIENTS 12 /* actual max is value -1 since we start from 1*/
+#define IPA_PM_MAX_CLIENTS 32 /* actual max is value -1 since we start from 1*/
 #define IPA_PM_MAX_EX_CL 64
 #define IPA_PM_THRESHOLD_MAX 5
 #define IPA_PM_EXCEPTION_MAX 2
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index 3351a33..3210a70 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -399,7 +399,7 @@
 
 static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
 
-static inline int ipa3_wwan_set_modem_perf_profile(int throughput)
+static inline int ipa3_wwan_set_modem_perf_profile(int throughput);
 static inline int ipa3_qmi_enable_per_client_stats(
 	struct ipa_enable_per_client_stats_req_msg_v01 *req,
 	struct ipa_enable_per_client_stats_resp_msg_v01 *resp)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 7861896..736c0fb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -1577,6 +1577,15 @@
 			}
 		}
 	}
+
+	/* commit the change to IPA-HW */
+	if (ipa3_ctx->ctrl->ipa3_commit_rt(IPA_IP_v4) ||
+		ipa3_ctx->ctrl->ipa3_commit_rt(IPA_IP_v6)) {
+		IPAERR("fail to commit rt-rule\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		mutex_unlock(&ipa3_ctx->lock);
+		return -EPERM;
+	}
 	mutex_unlock(&ipa3_ctx->lock);
 
 	return 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 13d2511..3cbc0c6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -756,16 +756,21 @@
 		return -EINVAL;
 	}
 
-	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
-		if (in->u.dl.comp_ring_base_pa % IPA_WDI_RING_ALIGNMENT ||
-			in->u.dl.ce_ring_base_pa % IPA_WDI_RING_ALIGNMENT) {
-			IPAERR("alignment failure on TX\n");
-			return -EINVAL;
-		}
-	} else {
-		if (in->u.ul.rdy_ring_base_pa % IPA_WDI_RING_ALIGNMENT) {
-			IPAERR("alignment failure on RX\n");
-			return -EINVAL;
+	if (!in->smmu_enabled) {
+		if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+			if (in->u.dl.comp_ring_base_pa %
+				IPA_WDI_RING_ALIGNMENT ||
+				in->u.dl.ce_ring_base_pa %
+				IPA_WDI_RING_ALIGNMENT) {
+				IPAERR("alignment failure on TX\n");
+					return -EINVAL;
+			}
+		} else {
+			if (in->u.ul.rdy_ring_base_pa %
+				IPA_WDI_RING_ALIGNMENT) {
+				IPAERR("alignment failure on RX\n");
+				return -EINVAL;
+			}
 		}
 	}
 
@@ -795,43 +800,73 @@
 			cmd.size = sizeof(*tx_2);
 		else
 			cmd.size = sizeof(*tx);
-		IPADBG("comp_ring_base_pa=0x%pa\n",
-				&in->u.dl.comp_ring_base_pa);
-		IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size);
-		IPADBG("ce_ring_base_pa=0x%pa\n", &in->u.dl.ce_ring_base_pa);
-		IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size);
-		IPADBG("ce_ring_doorbell_pa=0x%pa\n",
-				&in->u.dl.ce_door_bell_pa);
-		IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers);
+		if (in->smmu_enabled) {
+			IPADBG("comp_ring_size=%d\n",
+				in->u.dl_smmu.comp_ring_size);
+			IPADBG("ce_ring_size=%d\n", in->u.dl_smmu.ce_ring_size);
+			IPADBG("ce_ring_doorbell_pa=0x%pa\n",
+					&in->u.dl_smmu.ce_door_bell_pa);
+			IPADBG("num_tx_buffers=%d\n",
+				in->u.dl_smmu.num_tx_buffers);
+		} else {
+			IPADBG("comp_ring_base_pa=0x%pa\n",
+					&in->u.dl.comp_ring_base_pa);
+			IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size);
+			IPADBG("ce_ring_base_pa=0x%pa\n",
+				&in->u.dl.ce_ring_base_pa);
+			IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size);
+			IPADBG("ce_ring_doorbell_pa=0x%pa\n",
+					&in->u.dl.ce_door_bell_pa);
+			IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers);
+		}
 	} else {
 		if (ipa3_ctx->ipa_wdi2)
 			cmd.size = sizeof(*rx_2);
 		else
 			cmd.size = sizeof(*rx);
-		IPADBG("rx_ring_base_pa=0x%pa\n",
-			&in->u.ul.rdy_ring_base_pa);
-		IPADBG("rx_ring_size=%d\n",
-			in->u.ul.rdy_ring_size);
-		IPADBG("rx_ring_rp_pa=0x%pa\n",
-			&in->u.ul.rdy_ring_rp_pa);
-		IPADBG("rx_comp_ring_base_pa=0x%pa\n",
-			&in->u.ul.rdy_comp_ring_base_pa);
-		IPADBG("rx_comp_ring_size=%d\n",
-			in->u.ul.rdy_comp_ring_size);
-		IPADBG("rx_comp_ring_wp_pa=0x%pa\n",
-			&in->u.ul.rdy_comp_ring_wp_pa);
-		ipa3_ctx->uc_ctx.rdy_ring_base_pa =
-			in->u.ul.rdy_ring_base_pa;
-		ipa3_ctx->uc_ctx.rdy_ring_rp_pa =
-			in->u.ul.rdy_ring_rp_pa;
-		ipa3_ctx->uc_ctx.rdy_ring_size =
-			in->u.ul.rdy_ring_size;
-		ipa3_ctx->uc_ctx.rdy_comp_ring_base_pa =
-			in->u.ul.rdy_comp_ring_base_pa;
-		ipa3_ctx->uc_ctx.rdy_comp_ring_wp_pa =
-			in->u.ul.rdy_comp_ring_wp_pa;
-		ipa3_ctx->uc_ctx.rdy_comp_ring_size =
-			in->u.ul.rdy_comp_ring_size;
+		if (in->smmu_enabled) {
+			IPADBG("rx_ring_size=%d\n",
+				in->u.ul_smmu.rdy_ring_size);
+			IPADBG("rx_ring_rp_pa=0x%pa\n",
+				&in->u.ul_smmu.rdy_ring_rp_pa);
+			IPADBG("rx_comp_ring_size=%d\n",
+				in->u.ul_smmu.rdy_comp_ring_size);
+			IPADBG("rx_comp_ring_wp_pa=0x%pa\n",
+				&in->u.ul_smmu.rdy_comp_ring_wp_pa);
+			ipa3_ctx->uc_ctx.rdy_ring_rp_pa =
+				in->u.ul_smmu.rdy_ring_rp_pa;
+			ipa3_ctx->uc_ctx.rdy_ring_size =
+				in->u.ul_smmu.rdy_ring_size;
+			ipa3_ctx->uc_ctx.rdy_comp_ring_wp_pa =
+				in->u.ul_smmu.rdy_comp_ring_wp_pa;
+			ipa3_ctx->uc_ctx.rdy_comp_ring_size =
+				in->u.ul_smmu.rdy_comp_ring_size;
+		} else {
+			IPADBG("rx_ring_base_pa=0x%pa\n",
+				&in->u.ul.rdy_ring_base_pa);
+			IPADBG("rx_ring_size=%d\n",
+				in->u.ul.rdy_ring_size);
+			IPADBG("rx_ring_rp_pa=0x%pa\n",
+				&in->u.ul.rdy_ring_rp_pa);
+			IPADBG("rx_comp_ring_base_pa=0x%pa\n",
+				&in->u.ul.rdy_comp_ring_base_pa);
+			IPADBG("rx_comp_ring_size=%d\n",
+				in->u.ul.rdy_comp_ring_size);
+			IPADBG("rx_comp_ring_wp_pa=0x%pa\n",
+				&in->u.ul.rdy_comp_ring_wp_pa);
+			ipa3_ctx->uc_ctx.rdy_ring_base_pa =
+				in->u.ul.rdy_ring_base_pa;
+			ipa3_ctx->uc_ctx.rdy_ring_rp_pa =
+				in->u.ul.rdy_ring_rp_pa;
+			ipa3_ctx->uc_ctx.rdy_ring_size =
+				in->u.ul.rdy_ring_size;
+			ipa3_ctx->uc_ctx.rdy_comp_ring_base_pa =
+				in->u.ul.rdy_comp_ring_base_pa;
+			ipa3_ctx->uc_ctx.rdy_comp_ring_wp_pa =
+				in->u.ul.rdy_comp_ring_wp_pa;
+			ipa3_ctx->uc_ctx.rdy_comp_ring_size =
+				in->u.ul.rdy_comp_ring_size;
+		}
 	}
 
 	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
@@ -945,10 +980,11 @@
 			tx->comp_ring_size = len;
 			len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
 				in->u.dl.ce_ring_size;
-			IPADBG("TX CE ring smmu_en=%d ring_size=%d %d\n",
+			IPADBG("TX CE ring smmu_en=%d ring_size=%d %d 0x%lx\n",
 					in->smmu_enabled,
 					in->u.dl_smmu.ce_ring_size,
-					in->u.dl.ce_ring_size);
+					in->u.dl.ce_ring_size,
+					va);
 			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
 						in->smmu_enabled,
 						in->u.dl.ce_ring_base_pa,
@@ -975,8 +1011,19 @@
 				result = -ENOMEM;
 				goto uc_timeout;
 			}
-			tx->ce_ring_doorbell_pa = va;
-			tx->num_tx_buffers = in->u.dl.num_tx_buffers;
+
+			IPADBG("CE doorbell pa: 0x%pa va:0x%lx\n", &pa, va);
+			IPADBG("Is wdi_over_pcie ? (%s)\n",
+				ipa3_ctx->wdi_over_pcie ? "Yes":"No");
+
+			if (ipa3_ctx->wdi_over_pcie)
+				tx->ce_ring_doorbell_pa = pa;
+			else
+				tx->ce_ring_doorbell_pa = va;
+
+			tx->num_tx_buffers = in->smmu_enabled ?
+				in->u.dl_smmu.num_tx_buffers :
+				in->u.dl.num_tx_buffers;
 			tx->ipa_pipe_number = ipa_ep_idx;
 		}
 		out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 5d6d3cd..b7a561e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -24,6 +24,13 @@
 #include "ipahal/ipahal_hw_stats.h"
 #include "../ipa_rm_i.h"
 
+/*
+ * The following for adding code (ie. for EMULATION) not found on x86.
+ */
+#if IPA_EMULATION_COMPILE == 1
+# include "ipa_emulation_stubs.h"
+#endif
+
 #define IPA_V3_0_CLK_RATE_SVS2 (37.5 * 1000 * 1000UL)
 #define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
 #define IPA_V3_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
@@ -964,12 +971,12 @@
 	/* IPA_3_5_1 */
 	[IPA_3_5_1][IPA_CLIENT_WLAN1_PROD]          = {
 			true, IPA_v3_5_GROUP_UL_DL, true,
-			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
 			QMB_MASTER_SELECT_DDR,
 			{ 7, 1, 8, 16, IPA_EE_UC } },
 	[IPA_3_5_1][IPA_CLIENT_USB_PROD]            = {
 			true, IPA_v3_5_GROUP_UL_DL, true,
-			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
 			QMB_MASTER_SELECT_DDR,
 			{ 0, 0, 8, 16, IPA_EE_AP } },
 	[IPA_3_5_1][IPA_CLIENT_APPS_LAN_PROD] = {
@@ -979,7 +986,7 @@
 			{ 8, 7, 8, 16, IPA_EE_AP } },
 	[IPA_3_5_1][IPA_CLIENT_APPS_WAN_PROD] = {
 			true, IPA_v3_5_GROUP_UL_DL, true,
-			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
 			QMB_MASTER_SELECT_DDR,
 			{ 2, 3, 16, 32, IPA_EE_AP } },
 	[IPA_3_5_1][IPA_CLIENT_APPS_CMD_PROD]		= {
@@ -1119,86 +1126,86 @@
 			true,
 			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
 			QMB_MASTER_SELECT_DDR,
-			{ 6, 2, 8, 16, IPA_EE_UC } },
+			{ 6, 2, 8, 16, IPA_EE_UC, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_USB_PROD]            = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
 			QMB_MASTER_SELECT_DDR,
-			{ 0, 8, 8, 16, IPA_EE_AP } },
+			{ 0, 8, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_APPS_LAN_PROD]   = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 8, 10, 8, 16, IPA_EE_AP } },
+			{ 8, 10, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_APPS_WAN_PROD] = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
 			QMB_MASTER_SELECT_DDR,
-			{ 2, 3, 16, 32, IPA_EE_AP } },
+			{ 2, 3, 16, 32, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_APPS_CMD_PROD]	  = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
-			{ 5, 4, 20, 24, IPA_EE_AP } },
+			{ 5, 4, 20, 24, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_ODU_PROD]            = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
 			QMB_MASTER_SELECT_DDR,
-			{ 1, 0, 8, 16, IPA_EE_AP } },
+			{ 1, 0, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_ETHERNET_PROD]	  = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
 			QMB_MASTER_SELECT_DDR,
-			{ 9, 0, 8, 16, IPA_EE_UC } },
+			{ 9, 0, 8, 16, IPA_EE_UC, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_Q6_WAN_PROD]         = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 3, 0, 16, 32, IPA_EE_Q6 } },
+			{ 3, 0, 16, 32, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_Q6_CMD_PROD]	  = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 4, 1, 20, 24, IPA_EE_Q6 } },
+			{ 4, 1, 20, 24, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	/* Only for test purpose */
 	[IPA_4_0][IPA_CLIENT_TEST_PROD]           = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{0, 8, 8, 16, IPA_EE_AP } },
+			{0, 8, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_TEST1_PROD]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{0, 8, 8, 16, IPA_EE_AP } },
+			{0, 8, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_TEST2_PROD]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 1, 0, 8, 16, IPA_EE_AP } },
+			{ 1, 0, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_TEST3_PROD]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 7, 9, 8, 16, IPA_EE_AP } },
+			{ 7, 9, 8, 16, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_TEST4_PROD]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{8, 10, 8, 16, IPA_EE_AP } },
+			{8, 10, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 
 
 	[IPA_4_0][IPA_CLIENT_WLAN1_CONS]          = {
@@ -1206,73 +1213,73 @@
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 18, 3, 6, 9, IPA_EE_UC } },
+			{ 18, 3, 6, 9, IPA_EE_UC, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_WLAN2_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 20, 13, 9, 9, IPA_EE_AP } },
+			{ 20, 13, 9, 9, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_WLAN3_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 21, 14, 9, 9, IPA_EE_AP } },
+			{ 21, 14, 9, 9, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_USB_CONS]            = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 19, 12, 9, 9, IPA_EE_AP } },
+			{ 19, 12, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_USB_DPL_CONS]        = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 15, 7, 5, 5, IPA_EE_AP } },
+			{ 15, 7, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_APPS_LAN_CONS]       = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 10, 5, 9, 9, IPA_EE_AP } },
+			{ 10, 5, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_APPS_WAN_CONS]       = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 11, 6, 9, 9, IPA_EE_AP } },
+			{ 11, 6, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_ODU_EMB_CONS]        = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 17, 1, 17, 17, IPA_EE_AP } },
+			{ 17, 1, 17, 17, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_ETHERNET_CONS]	  = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 22, 1, 17, 17, IPA_EE_UC } },
+			{ 22, 1, 17, 17, IPA_EE_UC, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_Q6_LAN_CONS]         = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 14, 4, 9, 9, IPA_EE_Q6 } },
+			{ 14, 4, 9, 9, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_Q6_WAN_CONS]         = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 13, 3, 9, 9, IPA_EE_Q6 } },
+			{ 13, 3, 9, 9, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 16, 5, 9, 9, IPA_EE_Q6 } },
+			{ 16, 5, 9, 9, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	/* Only for test purpose */
 	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
 	[IPA_4_0][IPA_CLIENT_TEST_CONS]           = {
@@ -1280,38 +1287,38 @@
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 11, 6, 9, 9, IPA_EE_AP } },
+			{ 11, 6, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_TEST1_CONS]           = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 11, 6, 9, 9, IPA_EE_AP } },
+			{ 11, 6, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_TEST2_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 12, 2, 5, 5, IPA_EE_AP } },
+			{ 12, 2, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_TEST3_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 19, 12, 9, 9, IPA_EE_AP } },
+			{ 19, 12, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0][IPA_CLIENT_TEST4_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 21, 14, 9, 9, IPA_EE_AP } },
+			{ 21, 14, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
 	[IPA_4_0][IPA_CLIENT_DUMMY_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 31, 31, 8, 8, IPA_EE_AP } },
+			{ 31, 31, 8, 8, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 
 	/* IPA_4_0_MHI */
 	[IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_PROD]   = {
@@ -1319,74 +1326,74 @@
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 2, 3, 16, 32, IPA_EE_AP } },
+			{ 2, 3, 16, 32, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_APPS_CMD_PROD]	  = {
 			true, IPA_v4_0_MHI_GROUP_DDR,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
-			{ 5, 4, 20, 24, IPA_EE_AP } },
+			{ 5, 4, 20, 24, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0_MHI][IPA_CLIENT_MHI_PROD]            = {
 			true, IPA_v4_0_MHI_GROUP_PCIE,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_PCIE,
-			{ 1, 0, 8, 16, IPA_EE_AP } },
+			{ 1, 0, 8, 16, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_PROD]         = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 3, 0, 16, 32, IPA_EE_Q6 } },
+			{ 3, 0, 16, 32, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0_MHI][IPA_CLIENT_Q6_CMD_PROD]	  = {
 			true, IPA_v4_0_MHI_GROUP_PCIE,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 4, 1, 20, 24, IPA_EE_Q6 } },
+			{ 4, 1, 20, 24, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = {
 			true, IPA_v4_0_MHI_GROUP_DMA,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
-			{ 7, 9, 8, 16, IPA_EE_AP } },
+			{ 7, 9, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = {
 			true, IPA_v4_0_MHI_GROUP_DMA,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
-			{ 8, 10, 8, 16, IPA_EE_AP } },
+			{ 8, 10, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	/* Only for test purpose */
 	[IPA_4_0_MHI][IPA_CLIENT_TEST_PROD]           = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{0, 8, 8, 16, IPA_EE_AP } },
+			{0, 8, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST1_PROD]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{0, 8, 8, 16, IPA_EE_AP } },
+			{0, 8, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST2_PROD]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 1, 0, 8, 16, IPA_EE_AP } },
+			{ 1, 0, 8, 16, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST3_PROD]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{7, 9, 8, 16, IPA_EE_AP } },
+			{7, 9, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST4_PROD]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 8, 10, 8, 16, IPA_EE_AP } },
+			{ 8, 10, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 
 
 	[IPA_4_0_MHI][IPA_CLIENT_APPS_LAN_CONS]       = {
@@ -1394,87 +1401,87 @@
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 10, 5, 9, 9, IPA_EE_AP } },
+			{ 10, 5, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_CONS]       = {
 			true, IPA_v4_0_MHI_GROUP_DDR,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 11, 6, 9, 9, IPA_EE_AP } },
+			{ 11, 6, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_MHI_CONS]            = {
 			true, IPA_v4_0_MHI_GROUP_PCIE,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
-			{ 17, 1, 17, 17, IPA_EE_AP } },
+			{ 17, 1, 17, 17, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0_MHI][IPA_CLIENT_Q6_LAN_CONS]         = {
 			true, IPA_v4_0_MHI_GROUP_DDR,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 14, 4, 9, 9, IPA_EE_Q6 } },
+			{ 14, 4, 9, 9, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_CONS]         = {
 			true, IPA_v4_0_MHI_GROUP_DDR,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 13, 3, 9, 9, IPA_EE_Q6 } },
+			{ 13, 3, 9, 9, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = {
 			true, IPA_v4_0_MHI_GROUP_DMA,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
-			{ 20, 13, 9, 9, IPA_EE_AP } },
+			{ 20, 13, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = {
 			true, IPA_v4_0_MHI_GROUP_DMA,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
-			{ 21, 14, 9, 9, IPA_EE_AP } },
+			{ 21, 14, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 16, 5, 9, 9, IPA_EE_Q6 } },
+			{ 16, 5, 9, 9, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
 	/* Only for test purpose */
 	[IPA_4_0_MHI][IPA_CLIENT_TEST_CONS]           = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
-			{ 11, 6, 9, 9, IPA_EE_AP } },
+			{ 11, 6, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST1_CONS]           = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
-			{ 11, 6, 9, 9, IPA_EE_AP } },
+			{ 11, 6, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST2_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 12, 2, 5, 5, IPA_EE_AP } },
+			{ 12, 2, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST3_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
-			{ 19, 12, 9, 9, IPA_EE_AP } },
+			{ 19, 12, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST4_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
-			{ 21, 14, 9, 9, IPA_EE_AP } },
+			{ 21, 14, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
 	[IPA_4_0_MHI][IPA_CLIENT_DUMMY_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 31, 31, 8, 8, IPA_EE_AP } },
+			{ 31, 31, 8, 8, IPA_EE_AP, GSI_USE_PREFETCH_BUFS } },
 };
 
 /**
@@ -1965,6 +1972,29 @@
 		IPADBG("ipa_qmb_select_by_address_cons_en = %d\n",
 				comp_cfg.ipa_qmb_select_by_address_cons_en);
 	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg);
+		IPADBG("Before comp config\n");
+		IPADBG("gsi_multi_inorder_rd_dis = %d\n",
+			comp_cfg.gsi_multi_inorder_rd_dis);
+
+		IPADBG("gsi_multi_inorder_wr_dis = %d\n",
+			comp_cfg.gsi_multi_inorder_wr_dis);
+
+		comp_cfg.gsi_multi_inorder_rd_dis = true;
+		comp_cfg.gsi_multi_inorder_wr_dis = true;
+
+		ipahal_write_reg_fields(IPA_COMP_CFG, &comp_cfg);
+
+		ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg);
+		IPADBG("After comp config\n");
+		IPADBG("gsi_multi_inorder_rd_dis = %d\n",
+			comp_cfg.gsi_multi_inorder_rd_dis);
+
+		IPADBG("gsi_multi_inorder_wr_dis = %d\n",
+			comp_cfg.gsi_multi_inorder_wr_dis);
+	}
 }
 
 /**
@@ -1984,10 +2014,13 @@
 	max_writes.qmb_1_max_writes = 2;
 
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) {
-		max_writes.qmb_1_max_writes = 4;
 		max_reads.qmb_1_max_reads = 12;
+		max_writes.qmb_1_max_writes = 4;
 	}
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		max_reads.qmb_0_max_reads = 12;
+
 	ipahal_write_reg_fields(IPA_QSB_MAX_WRITES, &max_writes);
 	ipahal_write_reg_fields(IPA_QSB_MAX_READS, &max_reads);
 }
@@ -2123,6 +2156,28 @@
 	return ipa_ep_idx;
 }
 
+
+/**
+ * ipa_get_ep_prefetch_mode() - provide prefetch_mode endpoint
+ * @client: client type
+ *
+ * Return value: prefetch_mode
+ */
+enum gsi_prefetch_mode ipa_get_ep_prefetch_mode(enum ipa_client_type client)
+{
+	enum gsi_prefetch_mode prefetch_mode;
+
+	if (client >= IPA_CLIENT_MAX || client < 0) {
+		IPAERR_RL("Bad client number: client =%d\n", client);
+		return -IPA_EP_NOT_ALLOCATED;
+	}
+
+	prefetch_mode = ipa3_ep_mapping[ipa3_get_hw_type_index()][client].
+		ipa_gsi_ep_info.prefetch_mode;
+
+	return prefetch_mode;
+}
+
 /**
  * ipa3_get_gsi_ep_info() - provide gsi ep information
  * @client: IPA client value
@@ -2322,6 +2377,32 @@
 }
 
 /**
+ * ipa3_get_client_by_pipe() - return client type relative to pipe
+ * index
+ * @pipe_idx: IPA end-point number
+ *
+ * Return value: client type
+ */
+static enum ipa_client_type ipa3_get_client_by_pipe(int pipe_idx)
+{
+	int j = 0;
+
+	for (j = 0; j < IPA_CLIENT_MAX; j++) {
+		const struct ipa_ep_configuration *iec_ptr =
+			&(ipa3_ep_mapping[ipa3_get_hw_type_index()][j]);
+		if (iec_ptr->valid &&
+		    iec_ptr->ipa_gsi_ep_info.ipa_ep_num == pipe_idx)
+			break;
+	}
+
+	if (j == IPA_CLIENT_MAX)
+		IPADBG("ipa3_get_client_by_pipe(%d) can't find client\n",
+		       pipe_idx);
+
+	return j;
+}
+
+/**
  * ipa_init_ep_flt_bitmap() - Initialize the bitmap
  * that represents the End-points that supports filtering
  */
@@ -4870,7 +4951,8 @@
 	}
 
 	/* move resource group configuration from HLOS to TZ */
-	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION &&
+	    ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
 		IPAERR("skip configuring ipa_rx_hps_clients from HLOS\n");
 		return;
 	}
@@ -5179,6 +5261,77 @@
 }
 
 /**
+ * emulator_load_single_fw() - load firmware into emulator's memory
+ *
+ * @firmware: Structure which contains the FW data from the user space.
+ * @phdr: ELF program header
+ * @fw_base: memory location to which firmware should get loaded
+ * @offset_from_base: offset to start relative to fw_base
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+static int emulator_load_single_fw(
+	const struct firmware   *firmware,
+	const struct elf32_phdr *phdr,
+	void __iomem            *fw_base,
+	uint32_t                offset_from_base)
+{
+	int index;
+	uint32_t ofb;
+	const uint32_t *elf_data_ptr;
+
+	IPADBG("firmware(%pK) phdr(%pK) fw_base(%pK) offset_from_base(0x%x)\n",
+	       firmware, phdr, fw_base, offset_from_base);
+
+	if (phdr->p_offset > firmware->size) {
+		IPAERR("Invalid ELF: offset=%u is beyond elf_size=%zu\n",
+			phdr->p_offset, firmware->size);
+		return -EINVAL;
+	}
+	if ((firmware->size - phdr->p_offset) < phdr->p_filesz) {
+		IPAERR("Invalid ELF: offset=%u filesz=%u elf_size=%zu\n",
+			phdr->p_offset, phdr->p_filesz, firmware->size);
+		return -EINVAL;
+	}
+
+	if (phdr->p_memsz % sizeof(uint32_t)) {
+		IPAERR("FW mem size %u doesn't align to 32bit\n",
+			phdr->p_memsz);
+		return -EFAULT;
+	}
+
+	if (phdr->p_filesz > phdr->p_memsz) {
+		IPAERR("FW image too big src_size=%u dst_size=%u\n",
+			phdr->p_filesz, phdr->p_memsz);
+		return -EFAULT;
+	}
+
+	IPADBG("ELF: p_memsz(0x%x) p_filesz(0x%x) p_filesz/4(0x%x)\n",
+	       (uint32_t) phdr->p_memsz,
+	       (uint32_t) phdr->p_filesz,
+	       (uint32_t) (phdr->p_filesz/sizeof(uint32_t)));
+
+	/* Set the entire region to 0s */
+	ofb = offset_from_base;
+	for (index = 0; index < phdr->p_memsz/sizeof(uint32_t); index++) {
+		writel_relaxed(0, fw_base + ofb);
+		ofb += sizeof(uint32_t);
+	}
+
+	elf_data_ptr = (uint32_t *)(firmware->data + phdr->p_offset);
+
+	/* Write the FW */
+	ofb = offset_from_base;
+	for (index = 0; index < phdr->p_filesz/sizeof(uint32_t); index++) {
+		writel_relaxed(*elf_data_ptr, fw_base + ofb);
+		elf_data_ptr++;
+		ofb += sizeof(uint32_t);
+	}
+
+	return 0;
+}
+
+/**
  * ipa3_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM.
  *
  * @firmware: Structure which contains the FW data from the user space.
@@ -5286,6 +5439,240 @@
 	return 0;
 }
 
+/*
+ * The following needed for the EMULATION system. On a non-emulation
+ * system (ie. the real UE), this is functionality is done in the
+ * TZ...
+ */
+#define IPA_SPARE_REG_1_VAL (0xC0000805)
+
+static void ipa_gsi_setup_reg(void)
+{
+	u32 reg_val, start;
+	int i;
+	const struct ipa_gsi_ep_config *gsi_ep_info_cfg;
+	enum ipa_client_type type;
+
+	IPADBG("Setting up registers in preparation for firmware download\n");
+
+	/* enable GSI interface */
+	ipahal_write_reg(IPA_GSI_CONF, 1);
+
+	/*
+	 * Before configuring the FIFOs need to unset bit 30 in the
+	 * spare register
+	 */
+	ipahal_write_reg(IPA_SPARE_REG_1,
+			 (IPA_SPARE_REG_1_VAL & (~(1 << 30))));
+
+	/* setup IPA_ENDP_GSI_CFG_TLV_n reg */
+	start = 0;
+	ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes();
+	IPADBG("ipa_num_pipes=%u\n", ipa3_ctx->ipa_num_pipes);
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		type = ipa3_get_client_by_pipe(i);
+		gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+		IPADBG("for ep %d client is %d\n", i, type);
+		if (!gsi_ep_info_cfg)
+			continue;
+		IPADBG("Config is true");
+		reg_val = (gsi_ep_info_cfg->ipa_if_tlv << 16) + start;
+		start += gsi_ep_info_cfg->ipa_if_tlv;
+		ipahal_write_reg_n(IPA_ENDP_GSI_CFG_TLV_n, i, reg_val);
+	}
+
+	/* setup IPA_ENDP_GSI_CFG_AOS_n reg */
+	start = 0;
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		type = ipa3_get_client_by_pipe(i);
+		gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+		if (!gsi_ep_info_cfg)
+			continue;
+		reg_val = (gsi_ep_info_cfg->ipa_if_aos << 16) + start;
+		start += gsi_ep_info_cfg->ipa_if_aos;
+		ipahal_write_reg_n(IPA_ENDP_GSI_CFG_AOS_n, i, reg_val);
+	}
+
+	/* setup IPA_ENDP_GSI_CFG1_n reg */
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		type = ipa3_get_client_by_pipe(i);
+		gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+		if (!gsi_ep_info_cfg)
+			continue;
+		reg_val = (1 << 16) +
+			((u32)gsi_ep_info_cfg->ipa_gsi_chan_num << 8) +
+			gsi_ep_info_cfg->ee;
+		ipahal_write_reg_n(IPA_ENDP_GSI_CFG1_n, i, reg_val);
+	}
+
+	/*
+	 * Setup IPA_ENDP_GSI_CFG2_n reg: this register must be setup
+	 * as last one
+	 */
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		type = ipa3_get_client_by_pipe(i);
+		gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+		if (!gsi_ep_info_cfg)
+			continue;
+		reg_val = 1 << 31;
+		ipahal_write_reg_n(IPA_ENDP_GSI_CFG2_n, i, reg_val);
+		reg_val = 0;
+		ipahal_write_reg_n(IPA_ENDP_GSI_CFG2_n, i, reg_val);
+	}
+
+	/*
+	 * After configuring the FIFOs need to set bit 30 in the spare
+	 * register
+	 */
+	ipahal_write_reg(IPA_SPARE_REG_1,
+			 (IPA_SPARE_REG_1_VAL | (1 << 30)));
+}
+
+/**
+ * emulator_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM.
+ *
+ * @firmware: Structure which contains the FW data from the user space.
+ *
+ * Return value: 0 on success, negative otherwise
+ *
+ */
+int emulator_load_fws(
+	const struct firmware *firmware,
+	u32 transport_mem_base,
+	u32 transport_mem_size)
+{
+	const struct elf32_hdr *ehdr;
+	const struct elf32_phdr *phdr;
+	void __iomem *gsi_base;
+	uint32_t hps_seq_offs, dps_seq_offs;
+	unsigned long gsi_offset;
+	int rc;
+
+	IPADBG("Loading firmware(%pK)\n", firmware);
+
+	if (!firmware) {
+		IPAERR("firmware pointer passed to function is NULL\n");
+		return -EINVAL;
+	}
+
+	/* One program header per FW image: GSI, DPS and HPS */
+	if (firmware->size < (sizeof(*ehdr) + 3 * sizeof(*phdr))) {
+		IPAERR(
+		    "Missing ELF and Program headers firmware size=%zu\n",
+		    firmware->size);
+		return -EINVAL;
+	}
+
+	ehdr = (struct elf32_hdr *) firmware->data;
+
+	ipa_assert_on(!ehdr);
+
+	if (ehdr->e_phnum != 3) {
+		IPAERR("Unexpected number of ELF program headers\n");
+		return -EINVAL;
+	}
+
+	hps_seq_offs = ipahal_get_reg_ofst(IPA_HPS_SEQUENCER_FIRST);
+	dps_seq_offs = ipahal_get_reg_ofst(IPA_DPS_SEQUENCER_FIRST);
+
+	/*
+	 * Each ELF program header represents a FW image and contains:
+	 *  p_vaddr : The starting address to which the FW needs to loaded.
+	 *  p_memsz : The size of the IRAM (where the image loaded)
+	 *  p_filesz: The size of the FW image embedded inside the ELF
+	 *  p_offset: Absolute offset to the image from the head of the ELF
+	 *
+	 * NOTE WELL: On the emulation platform, the p_vaddr address
+	 *            is not relevant and is unused.  This is because
+	 *            on the emulation platform, the registers'
+	 *            address location is mutable, since it's mapped
+	 *            in via a PCIe probe.  Given this, it is the
+	 *            mapped address info that's used while p_vaddr is
+	 *            ignored.
+	 */
+	phdr = (struct elf32_phdr *)(firmware->data + sizeof(*ehdr));
+
+	phdr += 2;
+
+	/*
+	 * Attempt to load IPA HPS FW image
+	 */
+	if (phdr->p_memsz > ipahal_get_hps_img_mem_size()) {
+		IPAERR("Invalid IPA HPS img size memsz=%d dps_mem_size=%u\n",
+		       phdr->p_memsz, ipahal_get_hps_img_mem_size());
+		return -EINVAL;
+	}
+	IPADBG("Loading HPS FW\n");
+	rc = emulator_load_single_fw(
+	    firmware, phdr, ipa3_ctx->mmio, hps_seq_offs);
+	if (rc)
+		return rc;
+	IPADBG("Loading HPS FW complete\n");
+
+	--phdr;
+
+	/*
+	 * Attempt to load IPA DPS FW image
+	 */
+	if (phdr->p_memsz > ipahal_get_dps_img_mem_size()) {
+		IPAERR("Invalid IPA DPS img size memsz=%d dps_mem_size=%u\n",
+		       phdr->p_memsz, ipahal_get_dps_img_mem_size());
+		return -EINVAL;
+	}
+	IPADBG("Loading DPS FW\n");
+	rc = emulator_load_single_fw(
+	    firmware, phdr, ipa3_ctx->mmio, dps_seq_offs);
+	if (rc)
+		return rc;
+	IPADBG("Loading DPS FW complete\n");
+
+	/*
+	 * Run gsi register setup which is normally done in TZ on
+	 * non-EMULATION systems...
+	 */
+	ipa_gsi_setup_reg();
+
+	/*
+	 * Map to the GSI base...
+	 */
+	gsi_base = ioremap_nocache(transport_mem_base, transport_mem_size);
+
+	IPADBG("GSI base(0x%x) mapped to (%pK) with len (0x%x)\n",
+	       transport_mem_base,
+	       gsi_base,
+	       transport_mem_size);
+
+	if (!gsi_base) {
+		IPAERR("ioremap_nocache failed\n");
+		return -EFAULT;
+	}
+
+	--phdr;
+
+	/*
+	 * Attempt to load GSI FW image
+	 */
+	if (phdr->p_memsz > transport_mem_size) {
+		IPAERR(
+		    "Invalid GSI FW img size memsz=%d transport_mem_size=%u\n",
+		    phdr->p_memsz, transport_mem_size);
+		return -EINVAL;
+	}
+	IPADBG("Loading GSI FW\n");
+	gsi_get_inst_ram_offset_and_size(&gsi_offset, NULL);
+	rc = emulator_load_single_fw(
+	    firmware, phdr, gsi_base, (uint32_t) gsi_offset);
+	iounmap(gsi_base);
+	if (rc)
+		return rc;
+	IPADBG("Loading GSI FW complete\n");
+
+	IPADBG("IPA FWs (GSI FW, DPS and HPS) loaded successfully\n");
+
+	return 0;
+}
+
 /**
  * ipa3_is_msm_device() - Is the running device a MSM or MDM?
  *  Determine according to IPA version
@@ -5379,4 +5766,3 @@
 	desc->len = cmd_pyld->len;
 	desc->type = IPA_IMM_CMD_DESC;
 }
-
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index 1254fe3..530adef 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/errno.h>
 #include <linux/ipc_logging.h>
 #include <linux/debugfs.h>
 #include <linux/ipa.h>
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index 26b7f0f..d46f13c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -55,6 +55,14 @@
 			" %s:%d " fmt, ## args); \
 	} while (0)
 
+#define IPAHAL_DBG_REG_IPC_ONLY(fmt, args...) \
+	do { \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			" %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			" %s:%d " fmt, ## args); \
+	} while (0)
+
 #define IPAHAL_ERR_RL(fmt, args...) \
 		do { \
 			pr_err_ratelimited_ipa(IPAHAL_DRV_NAME " %s:%d " fmt, \
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index ce59488..2dbea95 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -133,6 +133,11 @@
 	__stringify(IPA_FEC_ATTR_EE_n),
 	__stringify(IPA_MBIM_DEAGGR_FEC_ATTR_EE_n),
 	__stringify(IPA_GEN_DEAGGR_FEC_ATTR_EE_n),
+	__stringify(IPA_GSI_CONF),
+	__stringify(IPA_ENDP_GSI_CFG1_n),
+	__stringify(IPA_ENDP_GSI_CFG2_n),
+	__stringify(IPA_ENDP_GSI_CFG_AOS_n),
+	__stringify(IPA_ENDP_GSI_CFG_TLV_n),
 };
 
 static void ipareg_construct_dummy(enum ipahal_reg_name reg,
@@ -1971,6 +1976,21 @@
 	[IPA_HW_v3_5][IPA_HPS_FTCH_ARB_QUEUE_WEIGHT] = {
 		ipareg_construct_hps_queue_weights,
 		ipareg_parse_hps_queue_weights, 0x000005a4, 0, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_GSI_CONF] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002790, 0x0, 0, 0, 0 },
+	[IPA_HW_v3_5][IPA_ENDP_GSI_CFG1_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002794, 0x4, 0, 0, 0 },
+	[IPA_HW_v3_5][IPA_ENDP_GSI_CFG2_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002A2C, 0x4, 0, 0, 0 },
+	[IPA_HW_v3_5][IPA_ENDP_GSI_CFG_AOS_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000029A8, 0x4, 0, 0, 0 },
+	[IPA_HW_v3_5][IPA_ENDP_GSI_CFG_TLV_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002924, 0x4, 0, 0, 0 },
 
 	/* IPAv4.0 */
 	[IPA_HW_v4_0][IPA_IRQ_SUSPEND_INFO_EE_n] = {
@@ -2217,7 +2237,7 @@
 		0x00000CC0, 0x70, 10, 23, 1},
 };
 
-int ipahal_print_all_regs(void)
+int ipahal_print_all_regs(bool print_to_dmesg)
 {
 	int i, j;
 
@@ -2236,9 +2256,16 @@
 
 		j = ipahal_reg_objs[ipahal_ctx->hw_type][i].n_start;
 
-		if (j == ipahal_reg_objs[ipahal_ctx->hw_type][i].n_end)
-			IPAHAL_DBG_REG("%s=0x%x\n", ipahal_reg_name_str(i),
-				ipahal_read_reg_n(i, j));
+		if (j == ipahal_reg_objs[ipahal_ctx->hw_type][i].n_end) {
+			if (print_to_dmesg)
+				IPAHAL_DBG_REG("%s=0x%x\n",
+					ipahal_reg_name_str(i),
+					ipahal_read_reg_n(i, j));
+			else
+				IPAHAL_DBG_REG_IPC_ONLY("%s=0x%x\n",
+					ipahal_reg_name_str(i),
+					ipahal_read_reg_n(i, j));
+		}
 
 		for (; j < ipahal_reg_objs[ipahal_ctx->hw_type][i].n_end; j++)
 			IPAHAL_DBG_REG("%s_%u=0x%x\n", ipahal_reg_name_str(i),
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index 7e8e8ba..fdf4fd1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -134,6 +134,11 @@
 	IPA_FEC_ATTR_EE_n,
 	IPA_MBIM_DEAGGR_FEC_ATTR_EE_n,
 	IPA_GEN_DEAGGR_FEC_ATTR_EE_n,
+	IPA_GSI_CONF,
+	IPA_ENDP_GSI_CFG1_n,
+	IPA_ENDP_GSI_CFG2_n,
+	IPA_ENDP_GSI_CFG_AOS_n,
+	IPA_ENDP_GSI_CFG_TLV_n,
 	IPA_REG_MAX,
 };
 
@@ -519,7 +524,7 @@
 };
 
 
-int ipahal_print_all_regs(void);
+int ipahal_print_all_regs(bool print_to_dmesg);
 
 /*
  * ipahal_reg_name_str() - returns string that represent the register
@@ -641,4 +646,3 @@
 	struct ipahal_reg_valmask *valmask);
 
 #endif /* _IPAHAL_REG_H_ */
-
diff --git a/drivers/platform/msm/ipa/test/Makefile b/drivers/platform/msm/ipa/test/Makefile
index 82bee5d..e18e534 100644
--- a/drivers/platform/msm/ipa/test/Makefile
+++ b/drivers/platform/msm/ipa/test/Makefile
@@ -1,2 +1,8 @@
+ifdef CONFIG_X86
+ccflags-y += -DIPA_EMULATION_COMPILE=1
+else
+ccflags-y += -DIPA_EMULATION_COMPILE=0
+endif
+
 obj-$(CONFIG_IPA_UT) += ipa_ut_mod.o
 ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o ipa_test_hw_stats.o ipa_pm_ut.o
diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
index 212557c..7f65956 100644
--- a/drivers/platform/msm/ipa/test/ipa_test_mhi.c
+++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
@@ -327,6 +327,8 @@
 	u32 prod_hdl;
 	u32 cons_hdl;
 	u32 test_prod_hdl;
+	phys_addr_t transport_phys_addr;
+	unsigned long transport_size;
 };
 
 static struct ipa_test_mhi_context *test_mhi_ctx;
@@ -780,11 +782,6 @@
 
 	IPA_UT_DBG("Start Setup\n");
 
-	if (!gsi_ctx) {
-		IPA_UT_ERR("No GSI ctx\n");
-		return -EINVAL;
-	}
-
 	if (!ipa3_ctx) {
 		IPA_UT_ERR("No IPA ctx\n");
 		return -EINVAL;
@@ -797,11 +794,20 @@
 		return -ENOMEM;
 	}
 
-	test_mhi_ctx->gsi_mmio = ioremap_nocache(gsi_ctx->per.phys_addr,
-		gsi_ctx->per.size);
-	if (!test_mhi_ctx) {
+	rc = ipa3_get_transport_info(&test_mhi_ctx->transport_phys_addr,
+				     &test_mhi_ctx->transport_size);
+	if (rc != 0) {
+		IPA_UT_ERR("ipa3_get_transport_info() failed\n");
+		rc = -EFAULT;
+		goto fail_free_ctx;
+	}
+
+	test_mhi_ctx->gsi_mmio =
+	    ioremap_nocache(test_mhi_ctx->transport_phys_addr,
+			    test_mhi_ctx->transport_size);
+	if (!test_mhi_ctx->gsi_mmio) {
 		IPA_UT_ERR("failed to remap GSI HW size=%lu\n",
-			gsi_ctx->per.size);
+			   test_mhi_ctx->transport_size);
 		rc = -EFAULT;
 		goto fail_free_ctx;
 	}
@@ -1385,7 +1391,7 @@
 		/* write value to event ring doorbell */
 		IPA_UT_LOG("DB to event 0x%llx: base %pa ofst 0x%x\n",
 			p_events[event_ring_index].wp,
-			&(gsi_ctx->per.phys_addr),
+			&(test_mhi_ctx->transport_phys_addr),
 			GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
 			event_ring_index + ipa3_ctx->mhi_evid_limits[0], 0));
 		iowrite32(p_events[event_ring_index].wp,
@@ -1432,7 +1438,7 @@
 				IPA_UT_LOG(
 					"DB to channel 0x%llx: base %pa ofst 0x%x\n"
 					, p_channels[channel_idx].wp
-					, &(gsi_ctx->per.phys_addr)
+					, &(test_mhi_ctx->transport_phys_addr)
 					, GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(
 						channel_idx, 0));
 				iowrite32(p_channels[channel_idx].wp,
@@ -3324,4 +3330,3 @@
 		ipa_mhi_test_in_loop_channel_reset_ipa_holb,
 		true, IPA_HW_v3_0, IPA_HW_MAX),
 } IPA_UT_DEFINE_SUITE_END(mhi);
-
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.c b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
index bcbcd87..dad3ec9 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_framework.c
+++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
@@ -1036,9 +1036,10 @@
  * If IPA driver already ready, continue initialization immediately.
  * if not, wait for IPA ready notification by IPA driver context
  */
-static int __init ipa_ut_module_init(void)
+int __init ipa_ut_module_init(void)
 {
-	int ret;
+	int ret = 0;
+	bool init_framewok = true;
 
 	IPA_UT_INFO("Loading IPA test module...\n");
 
@@ -1050,14 +1051,34 @@
 	mutex_init(&ipa_ut_ctx->lock);
 
 	if (!ipa_is_ready()) {
+		init_framewok = false;
+
 		IPA_UT_DBG("IPA driver not ready, registering callback\n");
+
 		ret = ipa_register_ipa_ready_cb(ipa_ut_ipa_ready_cb, NULL);
 
 		/*
-		 * If we received -EEXIST, IPA has initialized. So we need
-		 * to continue the initing process.
+		 * If the call to ipa_register_ipa_ready_cb() above
+		 * returns 0, this means that we've succeeded in
+		 * queuing up a future call to ipa_ut_framework_init()
+		 * and that the call to it will be made once the IPA
+		 * becomes ready.  If this is the case, the call to
+		 * ipa_ut_framework_init() below need not be made.
+		 *
+		 * If the call to ipa_register_ipa_ready_cb() above
+		 * returns -EEXIST, it means that during the call to
+		 * ipa_register_ipa_ready_cb(), the IPA has become
+		 * ready, and hence, no indirect call to
+		 * ipa_ut_framework_init() will be made, so we need to
+		 * call it ourselves below.
+		 *
+		 * If the call to ipa_register_ipa_ready_cb() above
+		 * return something other than 0 or -EEXIST, that's a
+		 * hard error.
 		 */
-		if (ret != -EEXIST) {
+		if (ret == -EEXIST) {
+			init_framewok = true;
+		} else {
 			if (ret) {
 				IPA_UT_ERR("IPA CB reg failed - %d\n", ret);
 				kfree(ipa_ut_ctx);
@@ -1067,12 +1088,15 @@
 		}
 	}
 
-	ret = ipa_ut_framework_init();
-	if (ret) {
-		IPA_UT_ERR("framework init failed\n");
-		kfree(ipa_ut_ctx);
-		ipa_ut_ctx = NULL;
+	if (init_framewok) {
+		ret = ipa_ut_framework_init();
+		if (ret) {
+			IPA_UT_ERR("framework init failed\n");
+			kfree(ipa_ut_ctx);
+			ipa_ut_ctx = NULL;
+		}
 	}
+
 	return ret;
 }
 
@@ -1081,7 +1105,7 @@
  *
  * Destroys the Framework and removes its context
  */
-static void ipa_ut_module_exit(void)
+void ipa_ut_module_exit(void)
 {
 	IPA_UT_DBG("Entry\n");
 
@@ -1093,8 +1117,9 @@
 	ipa_ut_ctx = NULL;
 }
 
+#if IPA_EMULATION_COMPILE == 0 /* On real UE, we have a module */
 module_init(ipa_ut_module_init);
 module_exit(ipa_ut_module_exit);
-
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("IPA Unit Test module");
+#endif
diff --git a/drivers/platform/msm/mhi_dev/mhi.c b/drivers/platform/msm/mhi_dev/mhi.c
index 1c16e5a..645385e 100644
--- a/drivers/platform/msm/mhi_dev/mhi.c
+++ b/drivers/platform/msm/mhi_dev/mhi.c
@@ -824,13 +824,13 @@
 }
 EXPORT_SYMBOL(mhi_dev_send_ee_event);
 
-static void mhi_dev_trigger_cb(void)
+static void mhi_dev_trigger_cb(enum mhi_client_channel ch_id)
 {
 	struct mhi_dev_ready_cb_info *info;
 	enum mhi_ctrl_info state_data;
 
 	list_for_each_entry(info, &mhi_ctx->client_cb_list, list)
-		if (info->cb) {
+		if (info->cb && info->cb_data.channel == ch_id) {
 			mhi_ctrl_state_info(info->cb_data.channel, &state_data);
 			info->cb_data.ctrl_info = state_data;
 			info->cb(&info->cb_data);
@@ -1021,10 +1021,9 @@
 		if (rc)
 			pr_err("Error sending command completion event\n");
 
-		/* Trigger callback to clients */
-		mhi_dev_trigger_cb();
-
 		mhi_update_state_info(ch_id, MHI_STATE_CONNECTED);
+		/* Trigger callback to clients */
+		mhi_dev_trigger_cb(ch_id);
 		if (ch_id == MHI_CLIENT_MBIM_OUT)
 			kobject_uevent_env(&mhi_ctx->dev->kobj,
 						KOBJ_CHANGE, connected);
@@ -1418,16 +1417,20 @@
 	union mhi_dev_ring_element_type *el;
 	int rc = 0;
 	struct mhi_req *req = (struct mhi_req *)mreq;
-	struct mhi_req *local_req = NULL;
 	union mhi_dev_ring_element_type *compl_ev = NULL;
 	struct mhi_dev *mhi = NULL;
 	unsigned long flags;
+	size_t transfer_len;
+	u32 snd_cmpl;
+	uint32_t rd_offset;
 
 	client = req->client;
 	ch = client->channel;
 	mhi = ch->ring->mhi_dev;
 	el = req->el;
-	local_req = req;
+	transfer_len = req->len;
+	snd_cmpl = req->snd_cmpl;
+	rd_offset = req->rd_offset;
 	ch->curr_ereq->context = ch;
 
 	dma_unmap_single(&mhi_ctx->pdev->dev, req->dma,
@@ -1441,14 +1444,13 @@
 		compl_ev->evt_tr_comp.chid = ch->ch_id;
 		compl_ev->evt_tr_comp.type =
 				MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT;
-		compl_ev->evt_tr_comp.len = el->tre.len;
+		compl_ev->evt_tr_comp.len = transfer_len;
 		compl_ev->evt_tr_comp.code = MHI_CMD_COMPL_CODE_EOT;
 		compl_ev->evt_tr_comp.ptr = ch->ring->ring_ctx->generic.rbase +
-				local_req->rd_offset * TR_RING_ELEMENT_SZ;
+						rd_offset * TR_RING_ELEMENT_SZ;
 		ch->curr_ereq->num_events++;
 
-		if (ch->curr_ereq->num_events >= MAX_TR_EVENTS ||
-				local_req->snd_cmpl){
+		if (ch->curr_ereq->num_events >= MAX_TR_EVENTS || snd_cmpl) {
 			mhi_log(MHI_MSG_VERBOSE,
 					"num of tr events %d for ch %d\n",
 					ch->curr_ereq->num_events, ch->ch_id);
@@ -1570,6 +1572,12 @@
 {
 	struct mhi_dev *mhi = dev_id;
 
+	if (!atomic_read(&mhi->mhi_dev_wake)) {
+		pm_stay_awake(mhi->dev);
+		atomic_set(&mhi->mhi_dev_wake, 1);
+		mhi_log(MHI_MSG_VERBOSE, "acquiring mhi wakelock in ISR\n");
+	}
+
 	disable_irq_nosync(mhi->mhi_irq);
 	schedule_work(&mhi->chdb_ctrl_work);
 	mhi_log(MHI_MSG_VERBOSE, "mhi irq triggered\n");
@@ -1881,6 +1889,7 @@
 			(struct mhi_dev_client_cb_reason *cb))
 {
 	int rc = 0;
+	int i = 0;
 	struct mhi_dev_channel *ch;
 	struct platform_device *pdev;
 
@@ -1904,6 +1913,38 @@
 		goto exit;
 	}
 
+	/* Pre allocate event requests */
+	ch->ereqs = kcalloc(MHI_MAX_EVT_REQ, sizeof(*ch->ereqs), GFP_KERNEL);
+	if (!ch->ereqs) {
+		rc = -ENOMEM;
+		goto free_client;
+	}
+	/* pre allocate buffers to queue transfer completion events */
+	ch->tr_events = kcalloc(MHI_MAX_EVT_REQ,
+				MAX_TR_EVENTS * sizeof(*ch->tr_events),
+				GFP_KERNEL);
+	if (!ch->tr_events) {
+		rc = -ENOMEM;
+		goto free_ereqs;
+	}
+
+	/*
+	 * Organize the above allocated event request block and
+	 * completion event block into linked lists. Each event
+	 * request includes a pointer to a block of MAX_TR_EVENTS
+	 * completion events.
+	 */
+	INIT_LIST_HEAD(&mhi_ctx->ch[chan_id].event_req_buffers);
+	for (i = 0; i < MHI_MAX_EVT_REQ; ++i) {
+		ch->ereqs[i].tr_events = ch->tr_events + i * MAX_TR_EVENTS;
+		list_add_tail(&ch->ereqs[i].list,
+				&mhi_ctx->ch[chan_id].event_req_buffers);
+	}
+	mhi_ctx->ch[chan_id].curr_ereq =
+		container_of(mhi_ctx->ch[chan_id].event_req_buffers.next,
+				struct event_req, list);
+	list_del_init(&mhi_ctx->ch[chan_id].curr_ereq->list);
+
 	ch->active_client = (*handle_client);
 	(*handle_client)->channel = ch;
 	(*handle_client)->event_trigger = mhi_dev_client_cb_reason;
@@ -1916,6 +1957,13 @@
 	else if (ch->state == MHI_DEV_CH_STOPPED)
 		ch->state = MHI_DEV_CH_PENDING_START;
 
+	goto exit;
+
+free_ereqs:
+	kfree(ch->ereqs);
+	ch->ereqs = NULL;
+free_client:
+	kfree(*handle_client);
 exit:
 	mutex_unlock(&ch->ch_lock);
 	return rc;
@@ -1949,14 +1997,12 @@
 			mhi_log(MHI_MSG_ERROR,
 				"Trying to close an active channel (%d)\n",
 				ch->ch_id);
-			mutex_unlock(&ch->ch_lock);
 			rc = -EAGAIN;
 			goto exit;
 		} else if (ch->tre_loc) {
 			mhi_log(MHI_MSG_ERROR,
 				"Trying to close channel (%d) when a TRE is active",
 				ch->ch_id);
-			mutex_unlock(&ch->ch_lock);
 			rc = -EAGAIN;
 			goto exit;
 		}
@@ -1964,6 +2010,10 @@
 
 	ch->state = MHI_DEV_CH_CLOSED;
 	ch->active_client = NULL;
+	kfree(ch->ereqs);
+	kfree(ch->tr_events);
+	ch->ereqs = NULL;
+	ch->tr_events = NULL;
 	kfree(handle);
 exit:
 	mutex_unlock(&ch->ch_lock);
@@ -2401,8 +2451,10 @@
 		return;
 	}
 
-	if (mhi_ctx->config_iatu || mhi_ctx->mhi_int)
+	if (mhi_ctx->config_iatu || mhi_ctx->mhi_int) {
+		mhi_ctx->mhi_int_en = true;
 		enable_irq(mhi_ctx->mhi_irq);
+	}
 
 	mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_CONFIGURED);
 }
@@ -2452,8 +2504,10 @@
 	 * If channel is open during registration, no callback is issued.
 	 * Instead return -EEXIST to notify the client. Clients request
 	 * is added to the list to notify future state change notification.
+	 * Channel struct may not be allocated yet if this function is called
+	 * early during boot - add an explicit check for non-null "ch".
 	 */
-	if (mhi_ctx->ch[channel].state == MHI_DEV_CH_STARTED) {
+	if (mhi_ctx->ch && (mhi_ctx->ch[channel].state == MHI_DEV_CH_STARTED)) {
 		mutex_unlock(&mhi_ctx->mhi_lock);
 		return -EEXIST;
 	}
@@ -2666,40 +2720,8 @@
 	if (!mhi->ch)
 		return -ENOMEM;
 
-
-	for (i = 0; i < mhi->cfg.channels; i++) {
+	for (i = 0; i < mhi->cfg.channels; i++)
 		mutex_init(&mhi->ch[i].ch_lock);
-		if (i == MHI_CLIENT_IP_SW_4_OUT || i == MHI_CLIENT_IP_SW_4_IN) {
-			int nreq = 0;
-
-			INIT_LIST_HEAD(&mhi->ch[i].event_req_buffers);
-			while (nreq < MHI_MAX_EVT_REQ) {
-				struct event_req *ereq;
-				/* Pre allocate event requests */
-				ereq = kzalloc(sizeof(struct event_req),
-						GFP_KERNEL);
-				if (!ereq)
-					return -ENOMEM;
-
-				/* pre allocate buffers to queue
-				 * transfer completion events
-				 */
-				ereq->tr_events = kzalloc(RING_ELEMENT_TYPE_SZ*
-						MAX_TR_EVENTS, GFP_KERNEL);
-				if (!ereq->tr_events) {
-					kfree(ereq);
-					return -ENOMEM;
-				}
-				list_add_tail(&ereq->list,
-						&mhi->ch[i].event_req_buffers);
-				nreq++;
-			}
-			mhi->ch[i].curr_ereq =
-				container_of(mhi->ch[i].event_req_buffers.next,
-						struct event_req, list);
-			list_del_init(&mhi->ch[i].curr_ereq->list);
-		}
-	}
 
 	spin_lock_init(&mhi->lock);
 	mhi->mmio_backup = devm_kzalloc(&pdev->dev,
@@ -2832,8 +2854,6 @@
 
 	INIT_LIST_HEAD(&mhi_ctx->event_ring_list);
 	INIT_LIST_HEAD(&mhi_ctx->process_ring_list);
-	INIT_LIST_HEAD(&mhi_ctx->client_cb_list);
-	mutex_init(&mhi_ctx->mhi_lock);
 	mutex_init(&mhi_ctx->mhi_event_lock);
 	mutex_init(&mhi_ctx->mhi_write_test);
 
@@ -2983,6 +3003,14 @@
 			dev_err(&pdev->dev,
 				"Failed to create IPC logging context\n");
 		}
+		/*
+		 * The below list and mutex should be initialized
+		 * before calling mhi_uci_init to avoid crash in
+		 * mhi_register_state_cb when accessing these.
+		 */
+		INIT_LIST_HEAD(&mhi_ctx->client_cb_list);
+		mutex_init(&mhi_ctx->mhi_lock);
+
 		mhi_uci_init();
 		mhi_update_state_info(MHI_DEV_UEVENT_CTRL,
 						MHI_STATE_CONFIGURED);
diff --git a/drivers/platform/msm/mhi_dev/mhi.h b/drivers/platform/msm/mhi_dev/mhi.h
index b9120fc..3559021 100644
--- a/drivers/platform/msm/mhi_dev/mhi.h
+++ b/drivers/platform/msm/mhi_dev/mhi.h
@@ -484,7 +484,13 @@
 	struct mutex			ch_lock;
 	/* client which the current inbound/outbound message is for */
 	struct mhi_dev_client		*active_client;
-
+	/*
+	 * Pointer to event request structs used to temporarily store
+	 * completion events and meta data before sending them to host
+	 */
+	struct event_req		*ereqs;
+	/* Pointer to completion event buffers */
+	union mhi_dev_ring_element_type *tr_events;
 	struct list_head		event_req_buffers;
 	struct event_req		*curr_ereq;
 
@@ -605,6 +611,8 @@
 
 	/*Register for interrupt */
 	bool				mhi_int;
+	bool				mhi_int_en;
+
 	/* Registered client callback list */
 	struct list_head		client_cb_list;
 
@@ -702,7 +710,9 @@
 	MHI_CLIENT_SMCT_IN = 45,
 	MHI_CLIENT_IP_SW_4_OUT  = 46,
 	MHI_CLIENT_IP_SW_4_IN  = 47,
-	MHI_MAX_SOFTWARE_CHANNELS = 48,
+	MHI_CLIENT_ADB_OUT = 48,
+	MHI_CLIENT_ADB_IN = 49,
+	MHI_MAX_SOFTWARE_CHANNELS,
 	MHI_CLIENT_TEST_OUT = 60,
 	MHI_CLIENT_TEST_IN = 61,
 	MHI_CLIENT_RESERVED_1_LOWER = 62,
@@ -712,6 +722,7 @@
 	MHI_CLIENT_RESERVED_2_LOWER = 102,
 	MHI_CLIENT_RESERVED_2_UPPER = 127,
 	MHI_MAX_CHANNELS = 102,
+	MHI_CLIENT_INVALID = 0xFFFFFFFF
 };
 
 /* Use ID 0 for legacy /dev/mhi_ctrl. Channel 0 is used for internal only */
diff --git a/drivers/platform/msm/mhi_dev/mhi_sm.c b/drivers/platform/msm/mhi_dev/mhi_sm.c
index 1200a36..af05c9e 100644
--- a/drivers/platform/msm/mhi_dev/mhi_sm.c
+++ b/drivers/platform/msm/mhi_dev/mhi_sm.c
@@ -19,6 +19,7 @@
 #include <linux/ipa_mhi.h>
 #include "mhi_hwio.h"
 #include "mhi_sm.h"
+#include <linux/interrupt.h>
 
 #define MHI_SM_DBG(fmt, args...) \
 	mhi_log(MHI_MSG_DBG, fmt, ##args)
@@ -775,6 +776,7 @@
 	struct mhi_sm_ep_pcie_event *chg_event = container_of(work,
 		struct mhi_sm_ep_pcie_event, work);
 	enum ep_pcie_event pcie_event = chg_event->event;
+	unsigned long flags;
 
 	MHI_SM_FUNC_ENTRY();
 
@@ -846,6 +848,15 @@
 
 		mhi_dev_restore_mmio(mhi_sm_ctx->mhi_dev);
 
+		spin_lock_irqsave(&mhi_sm_ctx->mhi_dev->lock, flags);
+		if ((mhi_sm_ctx->mhi_dev->mhi_int) &&
+				(!mhi_sm_ctx->mhi_dev->mhi_int_en)) {
+			enable_irq(mhi_sm_ctx->mhi_dev->mhi_irq);
+			mhi_sm_ctx->mhi_dev->mhi_int_en = true;
+			MHI_SM_DBG("Enable MHI IRQ during PCIe DEAST");
+		}
+		spin_unlock_irqrestore(&mhi_sm_ctx->mhi_dev->lock, flags);
+
 		res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle,
 			EP_PCIE_OPT_ENUM);
 		if (res) {
@@ -1141,6 +1152,7 @@
 {
 	struct mhi_sm_ep_pcie_event *dstate_change_evt;
 	enum ep_pcie_event event;
+	unsigned long flags;
 
 	MHI_SM_FUNC_ENTRY();
 
@@ -1173,6 +1185,16 @@
 		break;
 	case EP_PCIE_EVENT_PM_D3_HOT:
 		mhi_sm_ctx->stats.d3_hot_event_cnt++;
+
+		spin_lock_irqsave(&mhi_sm_ctx->mhi_dev->lock, flags);
+		if ((mhi_sm_ctx->mhi_dev->mhi_int) &&
+				(mhi_sm_ctx->mhi_dev->mhi_int_en)) {
+			disable_irq(mhi_sm_ctx->mhi_dev->mhi_irq);
+			mhi_sm_ctx->mhi_dev->mhi_int_en = false;
+			MHI_SM_DBG("Disable MHI IRQ during D3 HOT");
+		}
+		spin_unlock_irqrestore(&mhi_sm_ctx->mhi_dev->lock, flags);
+
 		mhi_dev_backup_mmio(mhi_sm_ctx->mhi_dev);
 		break;
 	case EP_PCIE_EVENT_PM_RST_DEAST:
@@ -1180,6 +1202,15 @@
 		break;
 	case EP_PCIE_EVENT_PM_D0:
 		mhi_sm_ctx->stats.d0_event_cnt++;
+
+		spin_lock_irqsave(&mhi_sm_ctx->mhi_dev->lock, flags);
+		if ((mhi_sm_ctx->mhi_dev->mhi_int) &&
+				(!mhi_sm_ctx->mhi_dev->mhi_int_en)) {
+			enable_irq(mhi_sm_ctx->mhi_dev->mhi_irq);
+			mhi_sm_ctx->mhi_dev->mhi_int_en = true;
+			MHI_SM_DBG("Enable MHI IRQ during D0");
+		}
+		spin_unlock_irqrestore(&mhi_sm_ctx->mhi_dev->lock, flags);
 		break;
 	case EP_PCIE_EVENT_LINKDOWN:
 		mhi_sm_ctx->stats.linkdown_event_cnt++;
diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c
index ed02d0d..f47d0f0 100644
--- a/drivers/platform/msm/mhi_dev/mhi_uci.c
+++ b/drivers/platform/msm/mhi_dev/mhi_uci.c
@@ -29,15 +29,18 @@
 #include <uapi/linux/mhi.h>
 #include "mhi.h"
 
-#define MHI_DEV_NODE_NAME_LEN		13
-#define MHI_MAX_NR_OF_CLIENTS		23
 #define MHI_SOFTWARE_CLIENT_START	0
 #define MHI_SOFTWARE_CLIENT_LIMIT	(MHI_MAX_SOFTWARE_CHANNELS/2)
 #define MHI_UCI_IPC_LOG_PAGES		(100)
 
-#define MAX_NR_TRBS_PER_CHAN		1
+/* Max number of MHI write request structures (used in async writes) */
+#define MAX_UCI_WR_REQ			10
+#define MAX_NR_TRBS_PER_CHAN		9
 #define MHI_QTI_IFACE_ID		4
-#define DEVICE_NAME "mhi"
+#define DEVICE_NAME			"mhi"
+#define MAX_DEVICE_NAME_SIZE		80
+
+#define MHI_UCI_ASYNC_READ_TIMEOUT	msecs_to_jiffies(100)
 
 enum uci_dbg_level {
 	UCI_DBG_VERBOSE = 0x0,
@@ -70,7 +73,158 @@
 	u32 nr_trbs;
 	/* direction of the channel, see enum mhi_chan_dir */
 	enum mhi_chan_dir dir;
-	u32 uci_ownership;
+	/* need to register mhi channel state change callback */
+	bool register_cb;
+	/* Name of char device */
+	char *device_name;
+};
+
+/* UCI channel attributes table */
+static const struct chan_attr uci_chan_attr_table[] = {
+	{
+		MHI_CLIENT_LOOPBACK_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_LOOPBACK_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_SAHARA_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_SAHARA_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_EFS_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_EFS_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_MBIM_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_MBIM_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_QMI_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_QMI_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_IP_CTRL_0_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_IP_CTRL_0_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_IP_CTRL_1_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_IP_CTRL_1_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_DUN_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_DUN_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		false,
+		NULL
+	},
+	{
+		MHI_CLIENT_ADB_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		true,
+		NULL
+	},
+	{
+		MHI_CLIENT_ADB_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		true,
+		"android_adb"
+	},
 };
 
 struct uci_ctrl {
@@ -87,6 +241,8 @@
 	u32 in_chan;
 	struct mhi_dev_client *out_handle;
 	struct mhi_dev_client *in_handle;
+	const struct chan_attr *in_chan_attr;
+	const struct chan_attr *out_chan_attr;
 	wait_queue_head_t read_wq;
 	wait_queue_head_t write_wq;
 	atomic_t read_data_ready;
@@ -101,10 +257,16 @@
 	struct mhi_uci_ctxt_t *uci_ctxt;
 	struct mutex in_chan_lock;
 	struct mutex out_chan_lock;
+	spinlock_t wr_req_lock;
+	unsigned int f_flags;
+	struct mhi_req *wreqs;
+	struct list_head wr_req_list;
+	struct completion read_done;
+	int (*send)(struct uci_client*, void*, u32);
+	int (*read)(struct uci_client*, struct mhi_req*, int*);
 };
 
 struct mhi_uci_ctxt_t {
-	struct chan_attr chan_attrib[MHI_MAX_SOFTWARE_CHANNELS];
 	struct uci_client client_handles[MHI_SOFTWARE_CLIENT_LIMIT];
 	struct uci_ctrl ctrl_handle;
 	void (*event_notifier)(struct mhi_dev_client_cb_reason *cb);
@@ -119,6 +281,7 @@
 };
 
 #define CHAN_TO_CLIENT(_CHAN_NR) (_CHAN_NR / 2)
+#define CLIENT_TO_CHAN(_CLIENT_NR) (_CLIENT_NR * 2)
 
 #define uci_log(_msg_lvl, _msg, ...) do { \
 	if (_msg_lvl >= mhi_uci_msg_lvl) { \
@@ -156,7 +319,7 @@
 {
 	int rc = 0;
 	u32 i, j;
-	struct chan_attr *chan_attributes;
+	const struct chan_attr *in_chan_attr;
 	size_t buf_size;
 	void *data_loc;
 
@@ -169,10 +332,18 @@
 		return -EINVAL;
 	}
 
-	chan_attributes = &uci_ctxt.chan_attrib[chan];
-	buf_size = chan_attributes->max_packet_size;
+	in_chan_attr = client_handle->in_chan_attr;
+	if (!in_chan_attr) {
+		uci_log(UCI_DBG_ERROR, "Null channel attributes for chan %d\n",
+				client_handle->in_chan);
+		return -EINVAL;
+	}
 
-	for (i = 0; i < (chan_attributes->nr_trbs); i++) {
+	/* Init the completion event for read */
+	init_completion(&client_handle->read_done);
+
+	buf_size = in_chan_attr->max_packet_size;
+	for (i = 0; i < (in_chan_attr->nr_trbs); i++) {
 		data_loc = kmalloc(buf_size, GFP_KERNEL);
 		if (!data_loc) {
 			rc = -ENOMEM;
@@ -191,48 +362,129 @@
 	return rc;
 }
 
-static int mhi_uci_send_packet(struct mhi_dev_client **client_handle, void *buf,
-		u32 size, u32 is_uspace_buf)
+static void mhi_uci_write_completion_cb(void *req)
 {
-	void *data_loc = NULL;
-	uintptr_t memcpy_result = 0;
-	u32 data_inserted_so_far = 0;
+	struct mhi_req *ureq = req;
 	struct uci_client *uci_handle;
+	unsigned long flags;
+
+	uci_handle = (struct uci_client *)ureq->context;
+	kfree(ureq->buf);
+	ureq->buf = NULL;
+
+	spin_lock_irqsave(&uci_handle->wr_req_lock, flags);
+	list_add_tail(&ureq->list, &uci_handle->wr_req_list);
+	spin_unlock_irqrestore(&uci_handle->wr_req_lock, flags);
+}
+
+static void mhi_uci_read_completion_cb(void *req)
+{
+	struct mhi_req *ureq = req;
+	struct uci_client *uci_handle;
+
+	uci_handle = (struct uci_client *)ureq->context;
+	complete(&uci_handle->read_done);
+}
+
+static int mhi_uci_send_sync(struct uci_client *uci_handle,
+			void *data_loc, u32 size)
+{
 	struct mhi_req ureq;
+	int ret_val;
 
-
-	uci_handle = container_of(client_handle, struct uci_client,
-					out_handle);
-
-	if (!client_handle || !buf ||
-		!size || !uci_handle)
-		return -EINVAL;
-
-	if (is_uspace_buf) {
-		data_loc = kmalloc(size, GFP_KERNEL);
-		if (!data_loc) {
-			uci_log(UCI_DBG_ERROR,
-				"Failed to allocate memory 0x%x\n",
-				size);
-			return -ENOMEM;
-		}
-		memcpy_result = copy_from_user(data_loc, buf, size);
-		if (memcpy_result)
-			goto error_memcpy;
-	} else {
-		data_loc = buf;
-	}
-	ureq.client = *client_handle;
+	ureq.client = uci_handle->out_handle;
 	ureq.buf = data_loc;
 	ureq.len = size;
 	ureq.chan = uci_handle->out_chan;
 	ureq.mode = IPA_DMA_SYNC;
 
-	data_inserted_so_far = mhi_dev_write_channel(&ureq);
+	ret_val = mhi_dev_write_channel(&ureq);
+
+	kfree(data_loc);
+	return ret_val;
+}
+
+static int mhi_uci_send_async(struct uci_client *uci_handle,
+			void *data_loc, u32 size)
+{
+	int bytes_to_write;
+	struct mhi_req *ureq;
+
+	uci_log(UCI_DBG_VERBOSE,
+		"Got async write for ch %d of size %d\n",
+		uci_handle->out_chan, size);
+
+	spin_lock_irq(&uci_handle->wr_req_lock);
+	if (list_empty(&uci_handle->wr_req_list)) {
+		uci_log(UCI_DBG_ERROR, "Write request pool empty\n");
+		spin_unlock_irq(&uci_handle->wr_req_lock);
+		return -ENOMEM;
+	}
+	ureq = container_of(uci_handle->wr_req_list.next,
+						struct mhi_req, list);
+	list_del_init(&ureq->list);
+	spin_unlock_irq(&uci_handle->wr_req_lock);
+
+	ureq->client = uci_handle->out_handle;
+	ureq->context = uci_handle;
+	ureq->buf = data_loc;
+	ureq->len = size;
+	ureq->chan = uci_handle->out_chan;
+	ureq->mode = IPA_DMA_ASYNC;
+	ureq->client_cb = mhi_uci_write_completion_cb;
+	ureq->snd_cmpl = 1;
+
+	bytes_to_write = mhi_dev_write_channel(ureq);
+	if (bytes_to_write != size)
+		goto error_async_transfer;
+
+	return bytes_to_write;
+
+error_async_transfer:
+	kfree(data_loc);
+	ureq->buf = NULL;
+	spin_lock_irq(&uci_handle->wr_req_lock);
+	list_add_tail(&ureq->list, &uci_handle->wr_req_list);
+	spin_unlock_irq(&uci_handle->wr_req_lock);
+
+	return bytes_to_write;
+}
+
+static int mhi_uci_send_packet(struct mhi_dev_client **client_handle,
+		const char __user *buf, u32 size)
+{
+	void *data_loc;
+	unsigned long memcpy_result;
+	struct uci_client *uci_handle;
+
+	if (!client_handle || !buf || !size)
+		return -EINVAL;
+
+	if (size > TRB_MAX_DATA_SIZE) {
+		uci_log(UCI_DBG_ERROR,
+			"Too big write size: %d, max supported size is %d\n",
+			size, TRB_MAX_DATA_SIZE);
+		return -EFBIG;
+	}
+
+	uci_handle = container_of(client_handle, struct uci_client,
+					out_handle);
+	data_loc = kmalloc(size, GFP_KERNEL);
+	if (!data_loc) {
+		uci_log(UCI_DBG_ERROR,
+		"Failed to allocate kernel buf for user requested size 0x%x\n",
+			size);
+		return -ENOMEM;
+	}
+	memcpy_result = copy_from_user(data_loc, buf, size);
+	if (memcpy_result)
+		goto error_memcpy;
+
+	return uci_handle->send(uci_handle, data_loc, size);
 
 error_memcpy:
 	kfree(data_loc);
-	return data_inserted_so_far;
+	return -EFAULT;
 }
 
 static unsigned int mhi_uci_ctrl_poll(struct file *file, poll_table *wait)
@@ -290,6 +542,119 @@
 	return mask;
 }
 
+static int mhi_uci_alloc_write_reqs(struct uci_client *client)
+{
+	int i;
+
+	client->wreqs = kcalloc(MAX_UCI_WR_REQ,
+				sizeof(struct mhi_req),
+				GFP_KERNEL);
+	if (!client->wreqs) {
+		uci_log(UCI_DBG_ERROR, "Write reqs alloc failed\n");
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&client->wr_req_list);
+	for (i = 0; i < MAX_UCI_WR_REQ; ++i)
+		list_add_tail(&client->wreqs[i].list, &client->wr_req_list);
+
+	uci_log(UCI_DBG_INFO,
+		"UCI write reqs allocation successful\n");
+	return 0;
+}
+
+static int mhi_uci_read_async(struct uci_client *uci_handle,
+			struct mhi_req *ureq, int *bytes_avail)
+{
+	int ret_val = 0;
+	unsigned long compl_ret;
+
+	uci_log(UCI_DBG_ERROR,
+		"Async read for ch %d\n", uci_handle->in_chan);
+
+	ureq->mode = IPA_DMA_ASYNC;
+	ureq->client_cb = mhi_uci_read_completion_cb;
+	ureq->snd_cmpl = 1;
+	ureq->context = uci_handle;
+
+	reinit_completion(&uci_handle->read_done);
+
+	*bytes_avail = mhi_dev_read_channel(ureq);
+	uci_log(UCI_DBG_VERBOSE, "buf_size = 0x%x bytes_read = 0x%x\n",
+		ureq->len, *bytes_avail);
+	if (*bytes_avail < 0) {
+		uci_log(UCI_DBG_ERROR, "Failed to read channel ret %d\n",
+			*bytes_avail);
+		return -EIO;
+	}
+
+	if (*bytes_avail > 0) {
+		uci_log(UCI_DBG_VERBOSE,
+			"Waiting for async read completion!\n");
+		compl_ret =
+			wait_for_completion_interruptible_timeout(
+			&uci_handle->read_done,
+			MHI_UCI_ASYNC_READ_TIMEOUT);
+
+		if (compl_ret == -ERESTARTSYS) {
+			uci_log(UCI_DBG_ERROR, "Exit signal caught\n");
+			return compl_ret;
+		} else if (compl_ret == 0) {
+			uci_log(UCI_DBG_ERROR, "Read timed out for ch %d\n",
+				uci_handle->in_chan);
+			return -EIO;
+		}
+		uci_log(UCI_DBG_VERBOSE,
+			"wk up Read completed on ch %d\n", ureq->chan);
+
+		uci_handle->pkt_loc = (void *)ureq->buf;
+		uci_handle->pkt_size = ureq->actual_len;
+
+		uci_log(UCI_DBG_VERBOSE,
+			"Got pkt of sz 0x%x at adr %pK, ch %d\n",
+			uci_handle->pkt_size,
+			ureq->buf, ureq->chan);
+	} else {
+		uci_handle->pkt_loc = NULL;
+		uci_handle->pkt_size = 0;
+	}
+
+	return ret_val;
+}
+
+static int mhi_uci_read_sync(struct uci_client *uci_handle,
+			struct mhi_req *ureq, int *bytes_avail)
+{
+	int ret_val = 0;
+
+	ureq->mode = IPA_DMA_SYNC;
+	*bytes_avail = mhi_dev_read_channel(ureq);
+
+	uci_log(UCI_DBG_VERBOSE, "buf_size = 0x%x bytes_read = 0x%x\n",
+		ureq->len, *bytes_avail);
+
+	if (*bytes_avail < 0) {
+		uci_log(UCI_DBG_ERROR, "Failed to read channel ret %d\n",
+			*bytes_avail);
+		return -EIO;
+	}
+
+	if (*bytes_avail > 0) {
+		uci_handle->pkt_loc = (void *)ureq->buf;
+		uci_handle->pkt_size = ureq->actual_len;
+
+		uci_log(UCI_DBG_VERBOSE,
+			"Got pkt of sz 0x%x at adr %pK, ch %d\n",
+			uci_handle->pkt_size,
+			ureq->buf, ureq->chan);
+	} else {
+		uci_handle->pkt_loc = NULL;
+		uci_handle->pkt_size = 0;
+	}
+
+	return ret_val;
+}
+
 static int open_client_mhi_channels(struct uci_client *uci_client)
 {
 	int rc = 0;
@@ -300,16 +665,27 @@
 			uci_client->in_chan);
 	mutex_lock(&uci_client->out_chan_lock);
 	mutex_lock(&uci_client->in_chan_lock);
+
+	/* Allocate write requests for async operations */
+	if (!(uci_client->f_flags & O_SYNC)) {
+		rc = mhi_uci_alloc_write_reqs(uci_client);
+		if (rc)
+			goto handle_not_rdy_err;
+		uci_client->send = mhi_uci_send_async;
+		uci_client->read = mhi_uci_read_async;
+	} else {
+		uci_client->send = mhi_uci_send_sync;
+		uci_client->read = mhi_uci_read_sync;
+	}
+
 	uci_log(UCI_DBG_DBG,
 			"Initializing inbound chan %d.\n",
 			uci_client->in_chan);
-
 	rc = mhi_init_read_chan(uci_client, uci_client->in_chan);
-	if (rc < 0) {
+	if (rc < 0)
 		uci_log(UCI_DBG_ERROR,
 			"Failed to init inbound 0x%x, ret 0x%x\n",
 			uci_client->in_chan, rc);
-	}
 
 	rc = mhi_dev_open_channel(uci_client->out_chan,
 			&uci_client->out_handle,
@@ -320,7 +696,6 @@
 	rc = mhi_dev_open_channel(uci_client->in_chan,
 			&uci_client->in_handle,
 			uci_ctxt.event_notifier);
-
 	if (rc < 0) {
 		uci_log(UCI_DBG_ERROR,
 			"Failed to open chan %d, ret 0x%x\n",
@@ -375,6 +750,7 @@
 			return -ENOMEM;
 		}
 		uci_handle->uci_ctxt = &uci_ctxt;
+		uci_handle->f_flags = file_handle->f_flags;
 		if (!atomic_read(&uci_handle->mhi_chans_open)) {
 			uci_log(UCI_DBG_INFO,
 				"Opening channels client %d\n",
@@ -397,20 +773,11 @@
 		struct file *file_handle)
 {
 	struct uci_client *uci_handle = file_handle->private_data;
-	struct mhi_uci_ctxt_t *uci_ctxt;
-	u32 nr_in_bufs = 0;
 	int rc = 0;
-	int in_chan = 0;
-	u32 buf_size = 0;
 
 	if (!uci_handle)
 		return -EINVAL;
 
-	uci_ctxt = uci_handle->uci_ctxt;
-	in_chan = iminor(mhi_inode) + 1;
-	nr_in_bufs = uci_ctxt->chan_attrib[in_chan].nr_trbs;
-	buf_size = uci_ctxt->chan_attrib[in_chan].max_packet_size;
-
 	if (atomic_sub_return(1, &uci_handle->ref_count) == 0) {
 		uci_log(UCI_DBG_DBG,
 				"Last client left, closing channel 0x%x\n",
@@ -418,6 +785,8 @@
 		if (atomic_read(&uci_handle->mhi_chans_open)) {
 			atomic_set(&uci_handle->mhi_chans_open, 0);
 
+			if (!(uci_handle->f_flags & O_SYNC))
+				kfree(uci_handle->wreqs);
 			mutex_lock(&uci_handle->out_chan_lock);
 			rc = mhi_dev_close_channel(uci_handle->out_handle);
 			wake_up(&uci_handle->write_wq);
@@ -553,7 +922,6 @@
 	struct mutex *mutex;
 	ssize_t bytes_copied = 0;
 	u32 addr_offset = 0;
-	void *local_buf = NULL;
 	struct mhi_req ureq;
 
 	if (!file || !ubuf || !uspace_buf_size ||
@@ -569,44 +937,19 @@
 	ureq.client = client_handle;
 	ureq.buf = uci_handle->in_buf_list[0].addr;
 	ureq.len = uci_handle->in_buf_list[0].buf_size;
-	ureq.mode = IPA_DMA_SYNC;
+
 
 	uci_log(UCI_DBG_VERBOSE, "Client attempted read on chan %d\n",
 			ureq.chan);
 	do {
 		if (!uci_handle->pkt_loc &&
-				!atomic_read(&uci_ctxt.mhi_disabled)) {
-
-			bytes_avail = mhi_dev_read_channel(&ureq);
-
-			uci_log(UCI_DBG_VERBOSE,
-				"reading from mhi_core local_buf = %p",
-				local_buf);
-			uci_log(UCI_DBG_VERBOSE,
-					"buf_size = 0x%x bytes_read = 0x%x\n",
-					 ureq.len, bytes_avail);
-
-			if (bytes_avail < 0) {
-				uci_log(UCI_DBG_ERROR,
-				"Failed to read channel ret %d\n",
-					bytes_avail);
-				ret_val =  -EIO;
+			!atomic_read(&uci_ctxt.mhi_disabled)) {
+			ret_val = uci_handle->read(uci_handle, &ureq,
+							&bytes_avail);
+			if (ret_val)
 				goto error;
-			}
-
-			if (bytes_avail > 0) {
-				uci_handle->pkt_loc = (void *) ureq.buf;
-				uci_handle->pkt_size = ureq.actual_len;
-
+			if (bytes_avail > 0)
 				*bytes_pending = (loff_t)uci_handle->pkt_size;
-				uci_log(UCI_DBG_VERBOSE,
-					"Got pkt of sz 0x%x at adr %p, ch %d\n",
-					uci_handle->pkt_size,
-					ureq.buf, ureq.chan);
-			} else {
-				uci_handle->pkt_loc = 0;
-				uci_handle->pkt_size = 0;
-			}
 		}
 		if (bytes_avail == 0) {
 
@@ -615,7 +958,10 @@
 				"No data read_data_ready %d, chan %d\n",
 				atomic_read(&uci_handle->read_data_ready),
 				ureq.chan);
-
+			if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY)) {
+				ret_val = -EAGAIN;
+				goto error;
+			}
 			ret_val = wait_event_interruptible(uci_handle->read_wq,
 				(!mhi_dev_channel_isempty(client_handle)));
 
@@ -719,10 +1065,10 @@
 	mutex_lock(&uci_handle->out_chan_lock);
 	while (!ret_val) {
 		ret_val = mhi_uci_send_packet(&uci_handle->out_handle,
-				(void *)buf, count, 1);
+						buf, count);
 		if (ret_val < 0) {
 			uci_log(UCI_DBG_ERROR,
-				"Error while writing data to MHI, chan %d, buf %p, size %d\n",
+				"Error while writing data to MHI, chan %d, buf %pK, size %d\n",
 				chan, (void *)buf, count);
 			ret_val = -EIO;
 			break;
@@ -732,6 +1078,8 @@
 				"No descriptors available, did we poll, chan %d?\n",
 				chan);
 			mutex_unlock(&uci_handle->out_chan_lock);
+			if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY))
+				return -EAGAIN;
 			ret_val = wait_event_interruptible(uci_handle->write_wq,
 				!mhi_dev_channel_isempty(
 					uci_handle->out_handle));
@@ -748,60 +1096,6 @@
 	return ret_val;
 }
 
-static int uci_init_client_attributes(struct mhi_uci_ctxt_t *uci_ctxt)
-{
-	u32 i = 0;
-	u32 data_size = TRB_MAX_DATA_SIZE;
-	u32 index = 0;
-	struct uci_client *client;
-	struct chan_attr *chan_attrib = NULL;
-
-	for (i = 0; i < ARRAY_SIZE(uci_ctxt->chan_attrib); i++) {
-		chan_attrib = &uci_ctxt->chan_attrib[i];
-		switch (i) {
-		case MHI_CLIENT_LOOPBACK_OUT:
-		case MHI_CLIENT_LOOPBACK_IN:
-		case MHI_CLIENT_SAHARA_OUT:
-		case MHI_CLIENT_SAHARA_IN:
-		case MHI_CLIENT_EFS_OUT:
-		case MHI_CLIENT_EFS_IN:
-		case MHI_CLIENT_MBIM_OUT:
-		case MHI_CLIENT_MBIM_IN:
-		case MHI_CLIENT_QMI_OUT:
-		case MHI_CLIENT_QMI_IN:
-		case MHI_CLIENT_IP_CTRL_0_OUT:
-		case MHI_CLIENT_IP_CTRL_0_IN:
-		case MHI_CLIENT_IP_CTRL_1_OUT:
-		case MHI_CLIENT_IP_CTRL_1_IN:
-		case MHI_CLIENT_DUN_OUT:
-		case MHI_CLIENT_DUN_IN:
-			chan_attrib->uci_ownership = 1;
-			break;
-		default:
-			chan_attrib->uci_ownership = 0;
-			break;
-		}
-		if (chan_attrib->uci_ownership) {
-			chan_attrib->chan_id = i;
-			chan_attrib->max_packet_size = data_size;
-			index = CHAN_TO_CLIENT(i);
-			client = &uci_ctxt->client_handles[index];
-			chan_attrib->nr_trbs = 9;
-			client->in_buf_list =
-			      kmalloc(sizeof(struct mhi_dev_iov) *
-					      chan_attrib->nr_trbs,
-					GFP_KERNEL);
-			if (client->in_buf_list == NULL)
-				return -ENOMEM;
-		}
-		if (i % 2 == 0)
-			chan_attrib->dir = MHI_DIR_OUT;
-		else
-			chan_attrib->dir = MHI_DIR_IN;
-	}
-	return 0;
-}
-
 void uci_ctrl_update(struct mhi_dev_client_cb_reason *reason)
 {
 	struct uci_ctrl *uci_ctrl_handle = NULL;
@@ -852,6 +1146,7 @@
 
 	mutex_init(&mhi_client->in_chan_lock);
 	mutex_init(&mhi_client->out_chan_lock);
+	spin_lock_init(&mhi_client->wr_req_lock);
 
 	uci_log(UCI_DBG_DBG, "Registering chan %d.\n", mhi_client->out_chan);
 	return 0;
@@ -921,6 +1216,109 @@
 #endif
 };
 
+static int uci_device_create(struct uci_client *client)
+{
+	int r;
+	int n;
+	ssize_t dst_size;
+	unsigned int client_index;
+	static char device_name[MAX_DEVICE_NAME_SIZE];
+
+	client_index = CHAN_TO_CLIENT(client->out_chan);
+	if (uci_ctxt.client_handles[client_index].dev)
+		return -EEXIST;
+
+	cdev_init(&uci_ctxt.cdev[client_index], &mhi_uci_client_fops);
+	uci_ctxt.cdev[client_index].owner = THIS_MODULE;
+	r = cdev_add(&uci_ctxt.cdev[client_index],
+		uci_ctxt.start_ctrl_nr + client_index, 1);
+	if (IS_ERR_VALUE(r)) {
+		uci_log(UCI_DBG_ERROR,
+			"Failed to add cdev for client %d, ret 0x%x\n",
+			client_index, r);
+		return r;
+	}
+	if (!client->in_chan_attr->device_name) {
+		n = snprintf(device_name, sizeof(device_name),
+			DEVICE_NAME "_pipe_%d", CLIENT_TO_CHAN(client_index));
+		if (n >= sizeof(device_name)) {
+			uci_log(UCI_DBG_ERROR, "Device name buf too short\n");
+			r = -E2BIG;
+			goto error;
+		}
+	} else {
+		dst_size = strscpy(device_name,
+				client->in_chan_attr->device_name,
+				sizeof(device_name));
+		if (dst_size <= 0) {
+			uci_log(UCI_DBG_ERROR, "Device name buf too short\n");
+			r = dst_size;
+			goto error;
+		}
+	}
+
+	uci_ctxt.client_handles[client_index].dev =
+		device_create(uci_ctxt.mhi_uci_class, NULL,
+				uci_ctxt.start_ctrl_nr + client_index,
+				NULL, device_name);
+	if (IS_ERR(uci_ctxt.client_handles[client_index].dev)) {
+		uci_log(UCI_DBG_ERROR,
+			"Failed to create device for client %d\n",
+			client_index);
+		r = -EIO;
+		goto error;
+	}
+
+	uci_log(UCI_DBG_INFO,
+		"Created device with class 0x%pK and ctrl number %d\n",
+		uci_ctxt.mhi_uci_class,
+		uci_ctxt.start_ctrl_nr + client_index);
+
+	return 0;
+
+error:
+	cdev_del(&uci_ctxt.cdev[client_index]);
+	return r;
+}
+
+static void mhi_uci_client_cb(struct mhi_dev_client_cb_data *cb_data)
+{
+	struct uci_client *client = cb_data->user_data;
+
+	uci_log(UCI_DBG_VERBOSE, " Rcvd MHI cb for channel %d, state %d\n",
+		cb_data->channel, cb_data->ctrl_info);
+
+	if (cb_data->ctrl_info == MHI_STATE_CONNECTED)
+		uci_device_create(client);
+}
+
+static int uci_init_client_attributes(struct mhi_uci_ctxt_t *uci_ctxt)
+{
+	u32 i;
+	u32 index;
+	struct uci_client *client;
+	const struct chan_attr *chan_attrib;
+
+	for (i = 0; i < ARRAY_SIZE(uci_chan_attr_table); i += 2) {
+		chan_attrib = &uci_chan_attr_table[i];
+		index = CHAN_TO_CLIENT(i);
+		client = &uci_ctxt->client_handles[index];
+		client->out_chan_attr = chan_attrib;
+		client->in_chan_attr = ++chan_attrib;
+		client->in_buf_list =
+			kcalloc(chan_attrib->nr_trbs,
+			sizeof(struct mhi_dev_iov),
+			GFP_KERNEL);
+		if (!client->in_buf_list)
+			return -ENOMEM;
+		/* Register callback with MHI if requested */
+		if (client->out_chan_attr->register_cb)
+			mhi_register_state_cb(mhi_uci_client_cb, client,
+						client->out_chan);
+	}
+	return 0;
+}
+
 int mhi_uci_init(void)
 {
 	u32 i = 0;
@@ -949,16 +1347,14 @@
 	uci_log(UCI_DBG_INFO, "Registering for MHI events.\n");
 
 	for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) {
-		if (uci_ctxt.chan_attrib[i * 2].uci_ownership) {
-			mhi_client = &uci_ctxt.client_handles[i];
-
-			r = mhi_register_client(mhi_client, i);
-
-			if (r) {
-				uci_log(UCI_DBG_CRITICAL,
-					"Failed to reg client %d ret %d\n",
-					r, i);
-			}
+		mhi_client = &uci_ctxt.client_handles[i];
+		if (!mhi_client->in_chan_attr)
+			continue;
+		r = mhi_register_client(mhi_client, i);
+		if (r) {
+			uci_log(UCI_DBG_CRITICAL,
+				"Failed to reg client %d ret %d\n",
+				r, i);
 		}
 	}
 
@@ -992,30 +1388,19 @@
 
 	uci_log(UCI_DBG_INFO, "Setting up device nodes.\n");
 	for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) {
-		if (uci_ctxt.chan_attrib[i*2].uci_ownership) {
-			cdev_init(&uci_ctxt.cdev[i], &mhi_uci_client_fops);
-			uci_ctxt.cdev[i].owner = THIS_MODULE;
-			r = cdev_add(&uci_ctxt.cdev[i],
-					uci_ctxt.start_ctrl_nr + i, 1);
-			if (IS_ERR_VALUE(r)) {
-				uci_log(UCI_DBG_ERROR,
-					"Failed to add cdev %d, ret 0x%x\n",
-					i, r);
-				goto failed_char_add;
-			}
-
-			uci_ctxt.client_handles[i].dev =
-				device_create(uci_ctxt.mhi_uci_class, NULL,
-						uci_ctxt.start_ctrl_nr + i,
-						NULL, DEVICE_NAME "_pipe_%d",
-						i * 2);
-			if (IS_ERR(uci_ctxt.client_handles[i].dev)) {
-				uci_log(UCI_DBG_ERROR,
-						"Failed to add cdev %d\n", i);
-				cdev_del(&uci_ctxt.cdev[i]);
-				goto failed_device_create;
-			}
-		}
+		mhi_client = &uci_ctxt.client_handles[i];
+		if (!mhi_client->in_chan_attr)
+			continue;
+		/*
+		 * Delay device node creation until the callback for
+		 * this client's channels is called by the MHI driver,
+		 * if one is registered.
+		 */
+		if (mhi_client->in_chan_attr->register_cb)
+			continue;
+		ret_val = uci_device_create(mhi_client);
+		if (ret_val)
+			goto failed_device_create;
 	}
 
 	/* Control node */
@@ -1052,7 +1437,6 @@
 
 	return 0;
 
-failed_char_add:
 failed_device_create:
 	while (--i >= 0) {
 		cdev_del(&uci_ctxt.cdev[i]);
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index 9374bc8..aafb5c8 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -1119,7 +1119,7 @@
 					&ctx->usb_bam_connections[idx];
 	unsigned long peer_bam_handle;
 
-	ret = sps_phy2h(pipe_connect->dst_phy_addr, &peer_bam_handle);
+	ret = sps_phy2h(pipe_connect->src_phy_addr, &peer_bam_handle);
 	if (ret) {
 		log_event_err("%s: sps_phy2h failed (src BAM) %d\n",
 				__func__, ret);
diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
index 1871602..c098328 100644
--- a/drivers/platform/x86/asus-wireless.c
+++ b/drivers/platform/x86/asus-wireless.c
@@ -145,8 +145,10 @@
 {
 	struct asus_wireless_data *data = acpi_driver_data(adev);
 
-	if (data->wq)
+	if (data->wq) {
+		devm_led_classdev_unregister(&adev->dev, &data->led);
 		destroy_workqueue(data->wq);
+	}
 	return 0;
 }
 
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 73e2f0b..c4770a9 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -1569,6 +1569,11 @@
 		acpi_id =
 			acpi_match_device(client->dev.driver->acpi_match_table,
 					  &client->dev);
+		if (!acpi_id) {
+			dev_err(&client->dev, "failed to match device name\n");
+			ret = -ENODEV;
+			goto error_1;
+		}
 		name = kasprintf(GFP_KERNEL, "%s-%d", acpi_id->id, num);
 	}
 	if (!name) {
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 3dd1722..8281c41 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -331,6 +331,14 @@
 	POWER_SUPPLY_ATTR(allow_hvdcp3),
 	POWER_SUPPLY_ATTR(hvdcp_opti_allowed),
 	POWER_SUPPLY_ATTR(max_pulse_allowed),
+	POWER_SUPPLY_ATTR(ignore_false_negative_isense),
+	POWER_SUPPLY_ATTR(battery_info),
+	POWER_SUPPLY_ATTR(battery_info_id),
+	POWER_SUPPLY_ATTR(enable_jeita_detection),
+	POWER_SUPPLY_ATTR(esr_actual),
+	POWER_SUPPLY_ATTR(esr_nominal),
+	POWER_SUPPLY_ATTR(soh),
+	POWER_SUPPLY_ATTR(qc_opti_disable),
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_ATTR(charge_counter_ext),
 	/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 275b982..e8d91ae 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -78,6 +78,7 @@
 	struct class		qcom_batt_class;
 	struct wakeup_source	*pl_ws;
 	struct notifier_block	nb;
+	bool			pl_disable;
 };
 
 struct pl_data *the_chip;
@@ -88,6 +89,7 @@
 
 enum {
 	AICL_RERUN_WA_BIT	= BIT(0),
+	FORCE_INOV_DISABLE_BIT	= BIT(1),
 };
 
 static int debug_mask;
@@ -848,6 +850,12 @@
 		chip->pl_settled_ua = 0;
 	}
 
+	/* notify parallel state change */
+	if (chip->pl_psy && (chip->pl_disable != pl_disable)) {
+		power_supply_changed(chip->pl_psy);
+		chip->pl_disable = (bool)pl_disable;
+	}
+
 	pl_dbg(chip, PR_PARALLEL, "parallel charging %s\n",
 		   pl_disable ? "disabled" : "enabled");
 
@@ -916,7 +924,8 @@
 	chip->pl_mode = pval.intval;
 
 	/* Disable autonomous votage increments for USBIN-USBIN */
-	if (IS_USBIN(chip->pl_mode)) {
+	if (IS_USBIN(chip->pl_mode)
+			&& (chip->wa_flags & FORCE_INOV_DISABLE_BIT)) {
 		if (!chip->hvdcp_hw_inov_dis_votable)
 			chip->hvdcp_hw_inov_dis_votable =
 					find_votable("HVDCP_HW_INOV_DIS");
@@ -1041,7 +1050,6 @@
 	else
 		vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, true, 0);
 
-	rerun_election(chip->fcc_votable);
 
 	if (IS_USBIN(chip->pl_mode)) {
 		/*
@@ -1200,7 +1208,9 @@
 	switch (smb_version) {
 	case PMI8998_SUBTYPE:
 	case PM660_SUBTYPE:
-		chip->wa_flags = AICL_RERUN_WA_BIT;
+		chip->wa_flags = AICL_RERUN_WA_BIT | FORCE_INOV_DISABLE_BIT;
+		break;
+	case PMI632_SUBTYPE:
 		break;
 	default:
 		break;
@@ -1301,6 +1311,7 @@
 		goto unreg_notifier;
 	}
 
+	chip->pl_disable = true;
 	chip->qcom_batt_class.name = "qcom-battery",
 	chip->qcom_batt_class.owner = THIS_MODULE,
 	chip->qcom_batt_class.class_attrs = pl_attributes;
diff --git a/drivers/power/supply/qcom/fg-alg.c b/drivers/power/supply/qcom/fg-alg.c
index 9b9a880..f3f2c66 100644
--- a/drivers/power/supply/qcom/fg-alg.c
+++ b/drivers/power/supply/qcom/fg-alg.c
@@ -16,11 +16,38 @@
 #include <linux/kernel.h>
 #include <linux/mutex.h>
 #include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
 #include "fg-alg.h"
 
 #define FULL_SOC_RAW		255
+#define FULL_BATT_SOC		GENMASK(31, 0)
 #define CAPACITY_DELTA_DECIPCT	500
 
+#define CENTI_ICORRECT_C0	105
+#define CENTI_ICORRECT_C1	20
+
+#define HOURS_TO_SECONDS	3600
+#define OCV_SLOPE_UV		10869
+#define MILLI_UNIT		1000
+#define MICRO_UNIT		1000000
+#define NANO_UNIT		1000000000
+
+#define DEFAULT_TTF_RUN_PERIOD_MS	10000
+#define DEFAULT_TTF_ITERM_DELTA_MA	200
+
+static const struct ttf_pt ttf_ln_table[] = {
+	{ 1000,		0 },
+	{ 2000,		693 },
+	{ 4000,		1386 },
+	{ 6000,		1792 },
+	{ 8000,		2079 },
+	{ 16000,	2773 },
+	{ 32000,	3466 },
+	{ 64000,	4159 },
+	{ 128000,	4852 },
+};
+
 /* Cycle counter APIs */
 
 /**
@@ -351,7 +378,7 @@
  */
 static int cap_learning_process_full_data(struct cap_learning *cl)
 {
-	int rc, cc_soc_sw, cc_soc_delta_pct;
+	int rc, cc_soc_sw, cc_soc_delta_centi_pct;
 	int64_t delta_cap_uah;
 
 	rc = cl->get_cc_soc(cl->data, &cc_soc_sw);
@@ -360,20 +387,21 @@
 		return rc;
 	}
 
-	cc_soc_delta_pct =
-		div64_s64((int64_t)(cc_soc_sw - cl->init_cc_soc_sw) * 100,
+	cc_soc_delta_centi_pct =
+		div64_s64((int64_t)(cc_soc_sw - cl->init_cc_soc_sw) * 10000,
 			cl->cc_soc_max);
 
 	/* If the delta is < 50%, then skip processing full data */
-	if (cc_soc_delta_pct < 50) {
-		pr_err("cc_soc_delta_pct: %d\n", cc_soc_delta_pct);
+	if (cc_soc_delta_centi_pct < 5000) {
+		pr_err("cc_soc_delta_centi_pct: %d\n", cc_soc_delta_centi_pct);
 		return -ERANGE;
 	}
 
-	delta_cap_uah = div64_s64(cl->learned_cap_uah * cc_soc_delta_pct, 100);
+	delta_cap_uah = div64_s64(cl->learned_cap_uah * cc_soc_delta_centi_pct,
+				 10000);
 	cl->final_cap_uah = cl->init_cap_uah + delta_cap_uah;
-	pr_debug("Current cc_soc=%d cc_soc_delta_pct=%d total_cap_uah=%lld\n",
-		cc_soc_sw, cc_soc_delta_pct, cl->final_cap_uah);
+	pr_debug("Current cc_soc=%d cc_soc_delta_centi_pct=%d total_cap_uah=%lld\n",
+		cc_soc_sw, cc_soc_delta_centi_pct, cl->final_cap_uah);
 	return 0;
 }
 
@@ -401,8 +429,8 @@
 		return -EINVAL;
 	}
 
-	cl->init_cap_uah = div64_s64(cl->learned_cap_uah * batt_soc_msb,
-					FULL_SOC_RAW);
+	cl->init_cap_uah = div64_s64(cl->learned_cap_uah * batt_soc,
+					FULL_BATT_SOC);
 
 	if (cl->prime_cc_soc) {
 		/*
@@ -668,3 +696,557 @@
 	mutex_init(&cl->lock);
 	return 0;
 }
+
+/* Time to full/empty algorithm  helper functions */
+
+static void ttf_circ_buf_add(struct ttf_circ_buf *buf, int val)
+{
+	buf->arr[buf->head] = val;
+	buf->head = (buf->head + 1) % ARRAY_SIZE(buf->arr);
+	buf->size = min(++buf->size, (int)ARRAY_SIZE(buf->arr));
+}
+
+static void ttf_circ_buf_clr(struct ttf_circ_buf *buf)
+{
+	buf->size = 0;
+	buf->head = 0;
+	memset(buf->arr, 0, sizeof(buf->arr));
+}
+
+static int cmp_int(const void *a, const void *b)
+{
+	return *(int *)a - *(int *)b;
+}
+
+static int ttf_circ_buf_median(struct ttf_circ_buf *buf, int *median)
+{
+	int *temp;
+
+	if (buf->size == 0)
+		return -ENODATA;
+
+	if (buf->size == 1) {
+		*median = buf->arr[0];
+		return 0;
+	}
+
+	temp = kmalloc_array(buf->size, sizeof(*temp), GFP_KERNEL);
+	if (!temp)
+		return -ENOMEM;
+
+	memcpy(temp, buf->arr, buf->size * sizeof(*temp));
+	sort(temp, buf->size, sizeof(*temp), cmp_int, NULL);
+
+	if (buf->size % 2)
+		*median = temp[buf->size / 2];
+	else
+		*median = (temp[buf->size / 2 - 1] + temp[buf->size / 2]) / 2;
+
+	kfree(temp);
+	return 0;
+}
+
+static int ttf_lerp(const struct ttf_pt *pts, size_t tablesize,
+						s32 input, s32 *output)
+{
+	int i;
+	s64 temp;
+
+	if (pts == NULL) {
+		pr_err("Table is NULL\n");
+		return -EINVAL;
+	}
+
+	if (tablesize < 1) {
+		pr_err("Table has no entries\n");
+		return -ENOENT;
+	}
+
+	if (tablesize == 1) {
+		*output = pts[0].y;
+		return 0;
+	}
+
+	if (pts[0].x > pts[1].x) {
+		pr_err("Table is not in acending order\n");
+		return -EINVAL;
+	}
+
+	if (input <= pts[0].x) {
+		*output = pts[0].y;
+		return 0;
+	}
+
+	if (input >= pts[tablesize - 1].x) {
+		*output = pts[tablesize - 1].y;
+		return 0;
+	}
+
+	for (i = 1; i < tablesize; i++) {
+		if (input >= pts[i].x)
+			continue;
+
+		temp = ((s64)pts[i].y - pts[i - 1].y) *
+						((s64)input - pts[i - 1].x);
+		temp = div_s64(temp, pts[i].x - pts[i - 1].x);
+		*output = temp + pts[i - 1].y;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int get_time_to_full_locked(struct ttf *ttf, int *val)
+{
+	int rc, ibatt_avg, vbatt_avg, rbatt = 0, msoc = 0, act_cap_mah = 0,
+		i_cc2cv = 0, soc_cc2cv, tau, divisor, iterm = 0, ttf_mode = 0,
+		i, soc_per_step, msoc_this_step, msoc_next_step,
+		ibatt_this_step, t_predicted_this_step, ttf_slope,
+		t_predicted_cv, t_predicted = 0, charge_type = 0,
+		float_volt_uv = 0, valid = 0, charge_status = 0;
+	s64 delta_ms;
+
+	rc = ttf->get_ttf_param(ttf->data, TTF_VALID, &valid);
+	if (rc < 0) {
+		pr_err("failed to get ttf_valid rc=%d\n", rc);
+		return rc;
+	}
+
+	if (!valid) {
+		*val = -EINVAL;
+		return 0;
+	}
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_CHG_STATUS, &charge_status);
+	if (rc < 0) {
+		pr_err("failed to get charge-status rc=%d\n", rc);
+		return rc;
+	}
+
+	if (charge_status != POWER_SUPPLY_STATUS_CHARGING) {
+		*val = -EINVAL;
+		return 0;
+	}
+
+	rc = ttf->get_ttf_param(ttf->data, TTF_MSOC, &msoc);
+	if (rc < 0) {
+		pr_err("failed to get msoc rc=%d\n", rc);
+		return rc;
+	}
+	pr_debug("TTF: msoc=%d\n", msoc);
+
+	/* the battery is considered full if the SOC is 100% */
+	if (msoc >= 100) {
+		*val = 0;
+		return 0;
+	}
+
+	rc = ttf->get_ttf_param(ttf->data, TTF_MODE, &ttf_mode);
+
+	/* when switching TTF algorithms the TTF needs to be reset */
+	if (ttf->mode != ttf_mode) {
+		ttf_circ_buf_clr(&ttf->ibatt);
+		ttf_circ_buf_clr(&ttf->vbatt);
+		ttf->last_ttf = 0;
+		ttf->last_ms = 0;
+		ttf->mode = ttf_mode;
+	}
+
+	/* at least 10 samples are required to produce a stable IBATT */
+	if (ttf->ibatt.size < MAX_TTF_SAMPLES) {
+		*val = -1;
+		return 0;
+	}
+
+	rc = ttf_circ_buf_median(&ttf->ibatt, &ibatt_avg);
+	if (rc < 0) {
+		pr_err("failed to get IBATT AVG rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = ttf_circ_buf_median(&ttf->vbatt, &vbatt_avg);
+	if (rc < 0) {
+		pr_err("failed to get VBATT AVG rc=%d\n", rc);
+		return rc;
+	}
+
+	ibatt_avg = -ibatt_avg / MILLI_UNIT;
+	vbatt_avg /= MILLI_UNIT;
+
+	rc = ttf->get_ttf_param(ttf->data, TTF_ITERM, &iterm);
+	if (rc < 0) {
+		pr_err("failed to get iterm rc=%d\n", rc);
+		return rc;
+	}
+	/* clamp ibatt_avg to iterm */
+	if (ibatt_avg < abs(iterm))
+		ibatt_avg = abs(iterm);
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_RBATT, &rbatt);
+	if (rc < 0) {
+		pr_err("failed to get battery resistance rc=%d\n", rc);
+		return rc;
+	}
+	rbatt /= MILLI_UNIT;
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_FCC, &act_cap_mah);
+	if (rc < 0) {
+		pr_err("failed to get ACT_BATT_CAP rc=%d\n", rc);
+		return rc;
+	}
+
+	pr_debug(" TTF: ibatt_avg=%d vbatt_avg=%d rbatt=%d act_cap_mah=%d\n",
+				ibatt_avg, vbatt_avg, rbatt, act_cap_mah);
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_VFLOAT, &float_volt_uv);
+	if (rc < 0) {
+		pr_err("failed to get float_volt_uv rc=%d\n", rc);
+		return rc;
+	}
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_CHG_TYPE, &charge_type);
+	if (rc < 0) {
+		pr_err("failed to get charge_type rc=%d\n", rc);
+		return rc;
+	}
+	/* estimated battery current at the CC to CV transition */
+	switch (ttf->mode) {
+	case TTF_MODE_NORMAL:
+		i_cc2cv = ibatt_avg * vbatt_avg /
+			max(MILLI_UNIT, float_volt_uv / MILLI_UNIT);
+		break;
+	case TTF_MODE_QNOVO:
+		i_cc2cv = min(
+			ttf->cc_step.arr[MAX_CC_STEPS - 1] / MILLI_UNIT,
+			ibatt_avg * vbatt_avg /
+			max(MILLI_UNIT, float_volt_uv / MILLI_UNIT));
+		break;
+	default:
+		pr_err("TTF mode %d is not supported\n", ttf->mode);
+		break;
+	}
+	pr_debug("TTF: i_cc2cv=%d\n", i_cc2cv);
+
+	/* if we are already in CV state then we can skip estimating CC */
+	if (charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER)
+		goto cv_estimate;
+
+	/* estimated SOC at the CC to CV transition */
+	soc_cc2cv = DIV_ROUND_CLOSEST(rbatt * i_cc2cv, OCV_SLOPE_UV);
+	soc_cc2cv = 100 - soc_cc2cv;
+	pr_debug("TTF: soc_cc2cv=%d\n", soc_cc2cv);
+
+	switch (ttf->mode) {
+	case TTF_MODE_NORMAL:
+		if (soc_cc2cv - msoc <= 0)
+			goto cv_estimate;
+
+		divisor = max(100, (ibatt_avg + i_cc2cv) / 2 * 100);
+		t_predicted = div_s64((s64)act_cap_mah * (soc_cc2cv - msoc) *
+						HOURS_TO_SECONDS, divisor);
+		break;
+	case TTF_MODE_QNOVO:
+		soc_per_step = 100 / MAX_CC_STEPS;
+		for (i = msoc / soc_per_step; i < MAX_CC_STEPS - 1; ++i) {
+			msoc_next_step = (i + 1) * soc_per_step;
+			if (i == msoc / soc_per_step)
+				msoc_this_step = msoc;
+			else
+				msoc_this_step = i * soc_per_step;
+
+			/* scale ibatt by 85% to account for discharge pulses */
+			ibatt_this_step = min(
+					ttf->cc_step.arr[i] / MILLI_UNIT,
+					ibatt_avg) * 85 / 100;
+			divisor = max(100, ibatt_this_step * 100);
+			t_predicted_this_step = div_s64((s64)act_cap_mah *
+					(msoc_next_step - msoc_this_step) *
+					HOURS_TO_SECONDS, divisor);
+			t_predicted += t_predicted_this_step;
+			pr_debug("TTF: [%d, %d] ma=%d t=%d\n",
+				msoc_this_step, msoc_next_step,
+				ibatt_this_step, t_predicted_this_step);
+		}
+		break;
+	default:
+		pr_err("TTF mode %d is not supported\n", ttf->mode);
+		break;
+	}
+
+cv_estimate:
+	pr_debug("TTF: t_predicted_cc=%d\n", t_predicted);
+
+	iterm = max(100, abs(iterm) + ttf->iterm_delta);
+	pr_debug("TTF: iterm=%d\n", iterm);
+
+	if (charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER)
+		tau = max(MILLI_UNIT, ibatt_avg * MILLI_UNIT / iterm);
+	else
+		tau = max(MILLI_UNIT, i_cc2cv * MILLI_UNIT / iterm);
+
+	rc = ttf_lerp(ttf_ln_table, ARRAY_SIZE(ttf_ln_table), tau, &tau);
+	if (rc < 0) {
+		pr_err("failed to interpolate tau rc=%d\n", rc);
+		return rc;
+	}
+
+	/* tau is scaled linearly from 95% to 100% SOC */
+	if (msoc >= 95)
+		tau = tau * 2 * (100 - msoc) / 10;
+
+	pr_debug("TTF: tau=%d\n", tau);
+	t_predicted_cv = div_s64((s64)act_cap_mah * rbatt * tau *
+						HOURS_TO_SECONDS, NANO_UNIT);
+	pr_debug("TTF: t_predicted_cv=%d\n", t_predicted_cv);
+	t_predicted += t_predicted_cv;
+
+	pr_debug("TTF: t_predicted_prefilter=%d\n", t_predicted);
+	if (ttf->last_ms != 0) {
+		delta_ms = ktime_ms_delta(ktime_get_boottime(),
+					  ms_to_ktime(ttf->last_ms));
+		if (delta_ms > 10000) {
+			ttf_slope = div64_s64(
+				((s64)t_predicted - ttf->last_ttf) *
+				MICRO_UNIT, delta_ms);
+			if (ttf_slope > -100)
+				ttf_slope = -100;
+			else if (ttf_slope < -2000)
+				ttf_slope = -2000;
+
+			t_predicted = div_s64(
+				(s64)ttf_slope * delta_ms, MICRO_UNIT) +
+				ttf->last_ttf;
+			pr_debug("TTF: ttf_slope=%d\n", ttf_slope);
+		} else {
+			t_predicted = ttf->last_ttf;
+		}
+	}
+
+	/* clamp the ttf to 0 */
+	if (t_predicted < 0)
+		t_predicted = 0;
+
+	pr_debug("TTF: t_predicted_postfilter=%d\n", t_predicted);
+	*val = t_predicted;
+	return 0;
+}
+
+/**
+ * ttf_get_time_to_full -
+ * @ttf: ttf object
+ * @val: Average time to full returned to the caller
+ *
+ * Get Average time to full the battery based on current soc, rbatt
+ * battery voltage and charge current etc.
+ */
+int ttf_get_time_to_full(struct ttf *ttf, int *val)
+{
+	int rc;
+
+	mutex_lock(&ttf->lock);
+	rc = get_time_to_full_locked(ttf, val);
+	mutex_unlock(&ttf->lock);
+
+	return rc;
+}
+
+static void ttf_work(struct work_struct *work)
+{
+	struct ttf *ttf = container_of(work,
+				struct ttf, ttf_work.work);
+	int rc, ibatt_now, vbatt_now, ttf_now, charge_status;
+	ktime_t ktime_now;
+
+	mutex_lock(&ttf->lock);
+	rc =  ttf->get_ttf_param(ttf->data, TTF_CHG_STATUS, &charge_status);
+	if (rc < 0) {
+		pr_err("failed to get charge_status rc=%d\n", rc);
+		goto end_work;
+	}
+	if (charge_status != POWER_SUPPLY_STATUS_CHARGING &&
+			charge_status != POWER_SUPPLY_STATUS_DISCHARGING)
+		goto end_work;
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_IBAT, &ibatt_now);
+	if (rc < 0) {
+		pr_err("failed to get battery current, rc=%d\n", rc);
+		goto end_work;
+	}
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_VBAT, &vbatt_now);
+	if (rc < 0) {
+		pr_err("failed to get battery voltage, rc=%d\n", rc);
+		goto end_work;
+	}
+
+	ttf_circ_buf_add(&ttf->ibatt, ibatt_now);
+	ttf_circ_buf_add(&ttf->vbatt, vbatt_now);
+
+	if (charge_status == POWER_SUPPLY_STATUS_CHARGING) {
+		rc = get_time_to_full_locked(ttf, &ttf_now);
+		if (rc < 0) {
+			pr_err("failed to get ttf, rc=%d\n", rc);
+			goto end_work;
+		}
+
+		/* keep the wake lock and prime the IBATT and VBATT buffers */
+		if (ttf_now < 0) {
+			/* delay for one FG cycle */
+			schedule_delayed_work(&ttf->ttf_work,
+					msecs_to_jiffies(1000));
+			mutex_unlock(&ttf->lock);
+			return;
+		}
+
+		/* update the TTF reference point every minute */
+		ktime_now = ktime_get_boottime();
+		if (ktime_ms_delta(ktime_now,
+				   ms_to_ktime(ttf->last_ms)) > 60000 ||
+				   ttf->last_ms == 0) {
+			ttf->last_ttf = ttf_now;
+			ttf->last_ms = ktime_to_ms(ktime_now);
+		}
+	}
+
+	/* recurse every 10 seconds */
+	schedule_delayed_work(&ttf->ttf_work, msecs_to_jiffies(ttf->period_ms));
+end_work:
+	ttf->awake_voter(ttf->data, false);
+	mutex_unlock(&ttf->lock);
+}
+
+/**
+ * ttf_get_time_to_empty -
+ * @ttf: ttf object
+ * @val: Average time to empty returned to the caller
+ *
+ * Get Average time to empty the battery based on current soc
+ * and average battery current.
+ */
+int ttf_get_time_to_empty(struct ttf *ttf, int *val)
+{
+	int rc, ibatt_avg, msoc, act_cap_mah, divisor, valid = 0,
+		charge_status = 0;
+
+	rc = ttf->get_ttf_param(ttf->data, TTF_VALID, &valid);
+	if (rc < 0) {
+		pr_err("failed to get ttf_valid rc=%d\n", rc);
+		return rc;
+	}
+
+	if (!valid) {
+		*val = -EINVAL;
+		return 0;
+	}
+
+	rc =  ttf->get_ttf_param(ttf->data, TTF_CHG_STATUS, &charge_status);
+	if (rc < 0) {
+		pr_err("failed to get charge-status rc=%d\n", rc);
+		return rc;
+	}
+
+	if (charge_status == POWER_SUPPLY_STATUS_CHARGING) {
+		*val = -EINVAL;
+		return 0;
+	}
+
+	rc = ttf_circ_buf_median(&ttf->ibatt, &ibatt_avg);
+	if (rc < 0) {
+		/* try to get instantaneous current */
+		rc = ttf->get_ttf_param(ttf->data, TTF_IBAT, &ibatt_avg);
+		if (rc < 0) {
+			pr_err("failed to get battery current, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	ibatt_avg /= MILLI_UNIT;
+	/* clamp ibatt_avg to 100mA */
+	if (ibatt_avg < 100)
+		ibatt_avg = 100;
+
+	rc = ttf->get_ttf_param(ttf->data, TTF_MSOC, &msoc);
+	if (rc < 0) {
+		pr_err("Error in getting capacity, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = ttf->get_ttf_param(ttf->data, TTF_FCC, &act_cap_mah);
+	if (rc < 0) {
+		pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+		return rc;
+	}
+
+	divisor = CENTI_ICORRECT_C0 * 100 + CENTI_ICORRECT_C1 * msoc;
+	divisor = ibatt_avg * divisor / 100;
+	divisor = max(100, divisor);
+	*val = act_cap_mah * msoc * HOURS_TO_SECONDS / divisor;
+
+	pr_debug("TTF: ibatt_avg=%d msoc=%d act_cap_mah=%d TTE=%d\n",
+			ibatt_avg, msoc, act_cap_mah, *val);
+
+	return 0;
+}
+
+/**
+ * ttf_update -
+ * @ttf: ttf object
+ * @input_present: Indicator for input presence
+ *
+ * Called by FG/QG driver when there is a state change (Charging status, SOC)
+ *
+ */
+void ttf_update(struct ttf *ttf, bool input_present)
+{
+	int delay_ms;
+
+	if (ttf->input_present == input_present)
+		return;
+
+	ttf->input_present = input_present;
+	if (input_present)
+		/* wait 35 seconds for the input to settle */
+		delay_ms = 35000;
+	else
+		/* wait 5 seconds for current to settle during discharge */
+		delay_ms = 5000;
+
+	ttf->awake_voter(ttf->data, true);
+	cancel_delayed_work_sync(&ttf->ttf_work);
+	mutex_lock(&ttf->lock);
+	ttf_circ_buf_clr(&ttf->ibatt);
+	ttf_circ_buf_clr(&ttf->vbatt);
+	ttf->last_ttf = 0;
+	ttf->last_ms = 0;
+	mutex_unlock(&ttf->lock);
+	schedule_delayed_work(&ttf->ttf_work, msecs_to_jiffies(delay_ms));
+}
+
+/**
+ * ttf_tte_init -
+ * @ttf: Time to full object
+ *
+ * FG/QG have to call this during driver probe to validate the required
+ * parameters after allocating ttf object.
+ *
+ */
+int ttf_tte_init(struct ttf *ttf)
+{
+	if (!ttf)
+		return -ENODEV;
+
+	if (!ttf->awake_voter || !ttf->get_ttf_param) {
+		pr_err("Insufficient functions for supporting ttf\n");
+		return -EINVAL;
+	}
+
+	if (!ttf->iterm_delta)
+		ttf->iterm_delta = DEFAULT_TTF_ITERM_DELTA_MA;
+	if (!ttf->period_ms)
+		ttf->period_ms = DEFAULT_TTF_RUN_PERIOD_MS;
+
+	mutex_init(&ttf->lock);
+	INIT_DELAYED_WORK(&ttf->ttf_work, ttf_work);
+
+	return 0;
+}
diff --git a/drivers/power/supply/qcom/fg-alg.h b/drivers/power/supply/qcom/fg-alg.h
index 7c25007..22e9c2b 100644
--- a/drivers/power/supply/qcom/fg-alg.h
+++ b/drivers/power/supply/qcom/fg-alg.h
@@ -15,6 +15,8 @@
 
 #define BUCKET_COUNT		8
 #define BUCKET_SOC_PCT		(256 / BUCKET_COUNT)
+#define MAX_CC_STEPS		20
+#define MAX_TTF_SAMPLES		10
 
 struct cycle_counter {
 	void		*data;
@@ -58,6 +60,58 @@
 	int (*prime_cc_soc)(void *data, u32 cc_soc_sw);
 };
 
+enum ttf_mode {
+	TTF_MODE_NORMAL = 0,
+	TTF_MODE_QNOVO,
+};
+
+enum ttf_param {
+	TTF_MSOC = 0,
+	TTF_VBAT,
+	TTF_IBAT,
+	TTF_FCC,
+	TTF_MODE,
+	TTF_ITERM,
+	TTF_RBATT,
+	TTF_VFLOAT,
+	TTF_CHG_TYPE,
+	TTF_CHG_STATUS,
+	TTF_VALID,
+};
+
+struct ttf_circ_buf {
+	int	arr[MAX_TTF_SAMPLES];
+	int	size;
+	int	head;
+};
+
+struct ttf_cc_step_data {
+	int arr[MAX_CC_STEPS];
+	int sel;
+};
+
+struct ttf_pt {
+	s32 x;
+	s32 y;
+};
+
+struct ttf {
+	void			*data;
+	struct ttf_circ_buf	ibatt;
+	struct ttf_circ_buf	vbatt;
+	struct ttf_cc_step_data	cc_step;
+	struct mutex		lock;
+	int			mode;
+	int			last_ttf;
+	int			input_present;
+	int			iterm_delta;
+	int			period_ms;
+	s64			last_ms;
+	struct delayed_work	ttf_work;
+	int (*get_ttf_param)(void *data, enum ttf_param, int *val);
+	int (*awake_voter)(void *data, bool vote);
+};
+
 int restore_cycle_count(struct cycle_counter *counter);
 void clear_cycle_count(struct cycle_counter *counter);
 void cycle_count_update(struct cycle_counter *counter, int batt_soc,
@@ -72,5 +126,9 @@
 int cap_learning_init(struct cap_learning *cl);
 int cap_learning_post_profile_init(struct cap_learning *cl,
 		int64_t nom_cap_uah);
+void ttf_update(struct ttf *ttf, bool input_present);
+int ttf_get_time_to_empty(struct ttf *ttf, int *val);
+int ttf_get_time_to_full(struct ttf *ttf, int *val);
+int ttf_tte_init(struct ttf *ttf);
 
 #endif
diff --git a/drivers/power/supply/qcom/qg-battery-profile.c b/drivers/power/supply/qcom/qg-battery-profile.c
index 36edd76..00a4533 100644
--- a/drivers/power/supply/qcom/qg-battery-profile.c
+++ b/drivers/power/supply/qcom/qg-battery-profile.c
@@ -398,10 +398,8 @@
 {
 	u8 table_index = charging ? TABLE_SOC_OCV1 : TABLE_SOC_OCV2;
 
-	if (!the_battery || !the_battery->profile) {
-		pr_err("Battery profile not loaded\n");
+	if (!the_battery || !the_battery->profile)
 		return -ENODEV;
-	}
 
 	*soc = interpolate_soc(&the_battery->profile[table_index],
 				batt_temp, UV_TO_DECIUV(ocv_uv));
@@ -416,10 +414,8 @@
 	u8 table_index = charging ? TABLE_FCC1 : TABLE_FCC2;
 	u32 fcc_mah;
 
-	if (!the_battery || !the_battery->profile) {
-		pr_err("Battery profile not loaded\n");
+	if (!the_battery || !the_battery->profile)
 		return -ENODEV;
-	}
 
 	fcc_mah = interpolate_single_row_lut(
 				&the_battery->profile[table_index],
diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h
index a1aeac2..f21b2a8 100644
--- a/drivers/power/supply/qcom/qg-core.h
+++ b/drivers/power/supply/qcom/qg-core.h
@@ -14,6 +14,7 @@
 
 #include <linux/kernel.h>
 #include "fg-alg.h"
+#include "qg-defs.h"
 
 struct qg_batt_props {
 	const char		*batt_type_str;
@@ -50,10 +51,25 @@
 	int			rbat_conn_mohm;
 	int			ignore_shutdown_soc_secs;
 	int			cold_temp_threshold;
+	int			esr_qual_i_ua;
+	int			esr_qual_v_uv;
+	int			esr_disable_soc;
 	bool			hold_soc_while_full;
 	bool			linearize_soc;
 	bool			cl_disable;
 	bool			cl_feedback_on;
+	bool			esr_disable;
+	bool			esr_discharge_enable;
+	bool			qg_ext_sense;
+};
+
+struct qg_esr_data {
+	u32			pre_esr_v;
+	u32			pre_esr_i;
+	u32			post_esr_v;
+	u32			post_esr_i;
+	u32			esr;
+	bool			valid;
 };
 
 struct qpnp_qg {
@@ -87,6 +103,7 @@
 	struct power_supply	*batt_psy;
 	struct power_supply	*usb_psy;
 	struct power_supply	*parallel_psy;
+	struct qg_esr_data	esr_data[QG_MAX_ESR_COUNT];
 
 	/* status variable */
 	u32			*debug_mask;
@@ -102,10 +119,18 @@
 	bool			charge_full;
 	int			charge_status;
 	int			charge_type;
+	int			chg_iterm_ma;
 	int			next_wakeup_ms;
+	int			esr_actual;
+	int			esr_nominal;
+	int			soh;
+	int			soc_reporting_ready;
+	u32			fifo_done_count;
 	u32			wa_flags;
 	u32			seq_no;
 	u32			charge_counter_uah;
+	u32			esr_avg;
+	u32			esr_last;
 	ktime_t			last_user_update_time;
 	ktime_t			last_fifo_update_time;
 
@@ -116,6 +141,7 @@
 	int			pon_soc;
 	int			batt_soc;
 	int			cc_soc;
+	int			full_soc;
 	struct alarm		alarm_timer;
 	u32			sdam_data[SDAM_MAX];
 
@@ -126,6 +152,14 @@
 	struct cap_learning	*cl;
 	/* charge counter */
 	struct cycle_counter	*counter;
+	/* ttf */
+	struct ttf		*ttf;
+};
+
+struct ocv_all {
+	u32 ocv_uv;
+	u32 ocv_raw;
+	char ocv_type[20];
 };
 
 enum ocv_type {
@@ -133,6 +167,7 @@
 	S3_GOOD_OCV,
 	S3_LAST_OCV,
 	SDAM_PON_OCV,
+	PON_OCV_MAX,
 };
 
 enum debug_mask {
@@ -147,6 +182,7 @@
 	QG_DEBUG_BUS_READ	= BIT(8),
 	QG_DEBUG_BUS_WRITE	= BIT(9),
 	QG_DEBUG_ALG_CL		= BIT(10),
+	QG_DEBUG_ESR		= BIT(11),
 };
 
 enum qg_irq {
diff --git a/drivers/power/supply/qcom/qg-defs.h b/drivers/power/supply/qcom/qg-defs.h
index 2061208..997ff70 100644
--- a/drivers/power/supply/qcom/qg-defs.h
+++ b/drivers/power/supply/qcom/qg-defs.h
@@ -34,6 +34,7 @@
 #define GOOD_OCV_VOTER			"GOOD_OCV_VOTER"
 #define PROFILE_IRQ_DISABLE		"NO_PROFILE_IRQ_DISABLE"
 #define QG_INIT_STATE_IRQ_DISABLE	"QG_INIT_STATE_IRQ_DISABLE"
+#define TTF_AWAKE_VOTER			"TTF_AWAKE_VOTER"
 
 #define V_RAW_TO_UV(V_RAW)		div_u64(194637ULL * (u64)V_RAW, 1000)
 #define I_RAW_TO_UA(I_RAW)		div_s64(152588LL * (s64)I_RAW, 1000)
@@ -44,6 +45,9 @@
 #define UV_TO_DECIUV(a)			(a / 100)
 #define DECIUV_TO_UV(a)			(a * 100)
 
+#define QG_MAX_ESR_COUNT		10
+#define QG_MIN_ESR_COUNT		2
+
 #define CAP(min, max, value)			\
 		((min > value) ? min : ((value > max) ? max : value))
 
diff --git a/drivers/power/supply/qcom/qg-reg.h b/drivers/power/supply/qcom/qg-reg.h
index 66f9be1..e0f400d 100644
--- a/drivers/power/supply/qcom/qg-reg.h
+++ b/drivers/power/supply/qcom/qg-reg.h
@@ -17,7 +17,9 @@
 #define QG_TYPE					0x0D
 
 #define QG_STATUS1_REG				0x08
+#define QG_OK_BIT				BIT(7)
 #define BATTERY_PRESENT_BIT			BIT(0)
+#define ESR_MEAS_DONE_BIT			BIT(4)
 
 #define QG_STATUS2_REG				0x09
 #define GOOD_OCV_BIT				BIT(1)
@@ -25,9 +27,13 @@
 #define QG_STATUS3_REG				0x0A
 #define COUNT_FIFO_RT_MASK			GENMASK(3, 0)
 
+#define QG_STATUS4_REG				0x0B
+#define ESR_MEAS_IN_PROGRESS_BIT		BIT(4)
+
 #define QG_INT_RT_STS_REG			0x10
 #define FIFO_UPDATE_DONE_RT_STS_BIT		BIT(3)
 #define VBAT_LOW_INT_RT_STS_BIT			BIT(1)
+#define BATTERY_MISSING_INT_RT_STS_BIT		BIT(0)
 
 #define QG_INT_LATCHED_STS_REG			0x18
 #define FIFO_UPDATE_DONE_INT_LAT_STS_BIT	BIT(3)
@@ -60,11 +66,25 @@
 #define QG_S3_ENTRY_IBAT_THRESHOLD_REG		0x5E
 #define QG_S3_EXIT_IBAT_THRESHOLD_REG		0x5F
 
+#define QG_S5_OCV_VALIDATE_MEAS_CTL1_REG	0x60
+#define ALLOW_S5_BIT				BIT(7)
+
+#define QG_S7_PON_OCV_MEAS_CTL1_REG		0x64
+#define ADC_CONV_DLY_MASK			GENMASK(3, 0)
+
+#define QG_ESR_MEAS_TRIG_REG			0x68
+#define HW_ESR_MEAS_START_BIT			BIT(0)
+
 #define QG_S7_PON_OCV_V_DATA0_REG		0x70
 #define QG_S7_PON_OCV_I_DATA0_REG		0x72
 #define QG_S3_GOOD_OCV_V_DATA0_REG		0x74
 #define QG_S3_GOOD_OCV_I_DATA0_REG		0x76
 
+#define QG_PRE_ESR_V_DATA0_REG			0x78
+#define QG_PRE_ESR_I_DATA0_REG			0x7A
+#define QG_POST_ESR_V_DATA0_REG			0x7C
+#define QG_POST_ESR_I_DATA0_REG			0x7E
+
 #define QG_V_ACCUM_DATA0_RT_REG			0x88
 #define QG_I_ACCUM_DATA0_RT_REG			0x8B
 #define QG_ACCUM_CNT_RT_REG			0x8E
@@ -80,15 +100,22 @@
 #define QG_LAST_S3_SLEEP_V_DATA0_REG		0xCC
 
 /* SDAM offsets */
-#define QG_SDAM_VALID_OFFSET			0x46
-#define QG_SDAM_SOC_OFFSET			0x47
-#define QG_SDAM_TEMP_OFFSET			0x48
-#define QG_SDAM_RBAT_OFFSET			0x4A
-#define QG_SDAM_OCV_OFFSET			0x4C
-#define QG_SDAM_IBAT_OFFSET			0x50
-#define QG_SDAM_TIME_OFFSET			0x54
-#define QG_SDAM_CYCLE_COUNT_OFFSET		0x58
-#define QG_SDAM_LEARNED_CAPACITY_OFFSET		0x68
-#define QG_SDAM_PON_OCV_OFFSET			0x7C
+#define QG_SDAM_VALID_OFFSET			0x46 /* 1-byte 0x46 */
+#define QG_SDAM_SOC_OFFSET			0x47 /* 1-byte 0x47 */
+#define QG_SDAM_TEMP_OFFSET			0x48 /* 2-byte 0x48-0x49 */
+#define QG_SDAM_RBAT_OFFSET			0x4A /* 2-byte 0x4A-0x4B */
+#define QG_SDAM_OCV_OFFSET			0x4C /* 4-byte 0x4C-0x4F */
+#define QG_SDAM_IBAT_OFFSET			0x50 /* 4-byte 0x50-0x53 */
+#define QG_SDAM_TIME_OFFSET			0x54 /* 4-byte 0x54-0x57 */
+#define QG_SDAM_CYCLE_COUNT_OFFSET		0x58 /* 16-byte 0x58-0x67 */
+#define QG_SDAM_LEARNED_CAPACITY_OFFSET		0x68 /* 2-byte 0x68-0x69 */
+#define QG_SDAM_ESR_CHARGE_DELTA_OFFSET		0x6A /* 4-byte 0x6A-0x6D */
+#define QG_SDAM_ESR_DISCHARGE_DELTA_OFFSET	0x6E /* 4-byte 0x6E-0x71 */
+#define QG_SDAM_ESR_CHARGE_SF_OFFSET		0x72 /* 2-byte 0x72-0x73 */
+#define QG_SDAM_ESR_DISCHARGE_SF_OFFSET		0x74 /* 2-byte 0x74-0x75 */
+#define QG_SDAM_MAX_OFFSET			0xA4
+
+/* Below offset is used by PBS */
+#define QG_SDAM_PON_OCV_OFFSET			0xBC /* 2-byte 0xBC-0xBD */
 
 #endif
diff --git a/drivers/power/supply/qcom/qg-sdam.c b/drivers/power/supply/qcom/qg-sdam.c
index 7bc4afa..a7cb97e 100644
--- a/drivers/power/supply/qcom/qg-sdam.c
+++ b/drivers/power/supply/qcom/qg-sdam.c
@@ -68,6 +68,26 @@
 		.offset = QG_SDAM_PON_OCV_OFFSET,
 		.length = 2,
 	},
+	[SDAM_ESR_CHARGE_DELTA] = {
+		.name	= "SDAM_ESR_CHARGE_DELTA",
+		.offset = QG_SDAM_ESR_CHARGE_DELTA_OFFSET,
+		.length = 4,
+	},
+	[SDAM_ESR_DISCHARGE_DELTA] = {
+		.name	= "SDAM_ESR_DISCHARGE_DELTA",
+		.offset = QG_SDAM_ESR_DISCHARGE_DELTA_OFFSET,
+		.length = 4,
+	},
+	[SDAM_ESR_CHARGE_SF] = {
+		.name	= "SDAM_ESR_CHARGE_SF_OFFSET",
+		.offset = QG_SDAM_ESR_CHARGE_SF_OFFSET,
+		.length = 2,
+	},
+	[SDAM_ESR_DISCHARGE_SF] = {
+		.name	= "SDAM_ESR_DISCHARGE_SF_OFFSET",
+		.offset = QG_SDAM_ESR_DISCHARGE_SF_OFFSET,
+		.length = 2,
+	},
 };
 
 int qg_sdam_write(u8 param, u32 data)
@@ -91,7 +111,7 @@
 	length = sdam_info[param].length;
 	rc = regmap_bulk_write(chip->regmap, offset, (u8 *)&data, length);
 	if (rc < 0)
-		pr_err("Failed to write offset=%0x4x param=%d value=%d\n",
+		pr_err("Failed to write offset=%0x4 param=%d value=%d\n",
 					offset, param, data);
 	else
 		pr_debug("QG SDAM write param=%s value=%d\n",
@@ -117,11 +137,12 @@
 		return -EINVAL;
 	}
 
+	*data = 0;
 	offset = chip->sdam_base + sdam_info[param].offset;
 	length = sdam_info[param].length;
 	rc = regmap_raw_read(chip->regmap, offset, (u8 *)data, length);
 	if (rc < 0)
-		pr_err("Failed to read offset=%0x4x param=%d\n",
+		pr_err("Failed to read offset=%0x4 param=%d\n",
 					offset, param);
 	else
 		pr_debug("QG SDAM read param=%s value=%d\n",
@@ -143,11 +164,11 @@
 	offset = chip->sdam_base + offset;
 	rc = regmap_bulk_write(chip->regmap, offset, data, (size_t)length);
 	if (rc < 0) {
-		pr_err("Failed to write offset=%0x4x value=%d\n",
+		pr_err("Failed to write offset=%0x4 value=%d\n",
 					offset, *data);
 	} else {
 		for (i = 0; i < length; i++)
-			pr_debug("QG SDAM write offset=%0x4x value=%d\n",
+			pr_debug("QG SDAM write offset=%0x4 value=%d\n",
 					offset++, data[i]);
 	}
 
@@ -167,10 +188,10 @@
 	offset = chip->sdam_base + offset;
 	rc = regmap_raw_read(chip->regmap, offset, (u8 *)data, (size_t)length);
 	if (rc < 0) {
-		pr_err("Failed to read offset=%0x4x\n", offset);
+		pr_err("Failed to read offset=%0x4\n", offset);
 	} else {
 		for (i = 0; i < length; i++)
-			pr_debug("QG SDAM read offset=%0x4x value=%d\n",
+			pr_debug("QG SDAM read offset=%0x4 value=%d\n",
 					offset++, data[i]);
 	}
 
diff --git a/drivers/power/supply/qcom/qg-sdam.h b/drivers/power/supply/qcom/qg-sdam.h
index 10e684f..45218a8 100644
--- a/drivers/power/supply/qcom/qg-sdam.h
+++ b/drivers/power/supply/qcom/qg-sdam.h
@@ -24,6 +24,10 @@
 	SDAM_IBAT_UA,
 	SDAM_TIME_SEC,
 	SDAM_PON_OCV_UV,
+	SDAM_ESR_CHARGE_DELTA,
+	SDAM_ESR_DISCHARGE_DELTA,
+	SDAM_ESR_CHARGE_SF,
+	SDAM_ESR_DISCHARGE_SF,
 	SDAM_MAX,
 };
 
diff --git a/drivers/power/supply/qcom/qg-util.c b/drivers/power/supply/qcom/qg-util.c
index d354799..824d914 100644
--- a/drivers/power/supply/qcom/qg-util.c
+++ b/drivers/power/supply/qcom/qg-util.c
@@ -111,6 +111,22 @@
 	return rc;
 }
 
+int qg_read_raw_data(struct qpnp_qg *chip, int addr, u32 *data)
+{
+	int rc;
+	u8 reg[2] = {0};
+
+	rc = qg_read(chip, chip->qg_base + addr, &reg[0], 2);
+	if (rc < 0) {
+		pr_err("Failed to read QG addr %d rc=%d\n", addr, rc);
+		return rc;
+	}
+
+	*data = reg[0] | (reg[1] << 8);
+
+	return rc;
+}
+
 int get_fifo_length(struct qpnp_qg *chip, u32 *fifo_length, bool rt)
 {
 	int rc;
diff --git a/drivers/power/supply/qcom/qg-util.h b/drivers/power/supply/qcom/qg-util.h
index 385c9e0..bb17afb 100644
--- a/drivers/power/supply/qcom/qg-util.h
+++ b/drivers/power/supply/qcom/qg-util.h
@@ -15,6 +15,7 @@
 int qg_read(struct qpnp_qg *chip, u32 addr, u8 *val, int len);
 int qg_write(struct qpnp_qg *chip, u32 addr, u8 *val, int len);
 int qg_masked_write(struct qpnp_qg *chip, int addr, u32 mask, u32 val);
+int qg_read_raw_data(struct qpnp_qg *chip, int addr, u32 *data);
 int get_fifo_length(struct qpnp_qg *chip, u32 *fifo_length, bool rt);
 int get_sample_count(struct qpnp_qg *chip, u32 *sample_count);
 int get_sample_interval(struct qpnp_qg *chip, u32 *sample_interval);
diff --git a/drivers/power/supply/qcom/qpnp-fg.c b/drivers/power/supply/qcom/qpnp-fg.c
index 015da41..deccb20 100644
--- a/drivers/power/supply/qcom/qpnp-fg.c
+++ b/drivers/power/supply/qcom/qpnp-fg.c
@@ -33,6 +33,7 @@
 #include <linux/ktime.h>
 #include <linux/power_supply.h>
 #include <linux/of_batterydata.h>
+#include <linux/spinlock.h>
 #include <linux/string_helpers.h>
 #include <linux/alarmtimer.h>
 #include <linux/qpnp/qpnp-revid.h>
@@ -72,6 +73,7 @@
 
 #define QPNP_FG_DEV_NAME "qcom,qpnp-fg"
 #define MEM_IF_TIMEOUT_MS	5000
+#define FG_CYCLE_MS		1500
 #define BUCKET_COUNT		8
 #define BUCKET_SOC_PCT		(256 / BUCKET_COUNT)
 
@@ -108,6 +110,7 @@
 	PMI8950		= 17,
 	PMI8996		= 19,
 	PMI8937		= 55,
+	PMI8940		= 64,
 };
 
 enum wa_flags {
@@ -150,6 +153,8 @@
 	int			min_temp;
 	int			max_temp;
 	int			vbat_est_thr_uv;
+	int			max_cap_limit;
+	int			min_cap_limit;
 };
 
 struct fg_rslow_data {
@@ -275,11 +280,45 @@
 	DATA(BATT_ID_INFO,    0x594,   3,      1,     -EINVAL),
 };
 
+enum fg_mem_backup_index {
+	FG_BACKUP_SOC = 0,
+	FG_BACKUP_CYCLE_COUNT,
+	FG_BACKUP_CC_SOC_COEFF,
+	FG_BACKUP_IGAIN,
+	FG_BACKUP_VCOR,
+	FG_BACKUP_TEMP_COUNTER,
+	FG_BACKUP_AGING_STORAGE,
+	FG_BACKUP_MAH_TO_SOC,
+	FG_BACKUP_MAX,
+};
+
+#define BACKUP(_idx, _address, _offset, _length,  _value)	\
+	[FG_BACKUP_##_idx] = {				\
+		.address = _address,			\
+		.offset = _offset,			\
+		.len = _length,			\
+		.value = _value,			\
+	}						\
+
+static struct fg_mem_data fg_backup_regs[FG_BACKUP_MAX] = {
+	/*       ID           Address, Offset, Length, Value*/
+	BACKUP(SOC,		0x564,   0,      24,     -EINVAL),
+	BACKUP(CYCLE_COUNT,	0x5E8,   0,      16,     -EINVAL),
+	BACKUP(CC_SOC_COEFF,	0x5BC,   0,      8,     -EINVAL),
+	BACKUP(IGAIN,		0x424,   0,      4,     -EINVAL),
+	BACKUP(VCOR,		0x484,   0,      4,     -EINVAL),
+	BACKUP(TEMP_COUNTER,	0x580,   0,      4,     -EINVAL),
+	BACKUP(AGING_STORAGE,	0x5E4,   0,      4,     -EINVAL),
+	BACKUP(MAH_TO_SOC,	0x4A0,   0,      4,     -EINVAL),
+};
+
 static int fg_debug_mask;
 module_param_named(
 	debug_mask, fg_debug_mask, int, 00600
 );
 
+static int fg_reset_on_lockup;
+
 static int fg_sense_type = -EINVAL;
 static int fg_restart;
 
@@ -298,9 +337,18 @@
 	sram_update_period_ms, fg_sram_update_period_ms, int, 00600
 );
 
+static bool fg_batt_valid_ocv;
+module_param_named(batt_valid_ocv, fg_batt_valid_ocv, bool, 0600
+);
+
+static int fg_batt_range_pct;
+module_param_named(batt_range_pct, fg_batt_range_pct, int, 0600
+);
+
 struct fg_irq {
 	int			irq;
-	unsigned long		disabled;
+	bool			disabled;
+	bool			wakeup;
 };
 
 enum fg_soc_irq {
@@ -348,6 +396,16 @@
 	MAX_ADDRESS,
 };
 
+enum batt_info_params {
+	BATT_INFO_NOTIFY = 0,
+	BATT_INFO_SOC,
+	BATT_INFO_RES_ID,
+	BATT_INFO_VOLTAGE,
+	BATT_INFO_TEMP,
+	BATT_INFO_FCC,
+	BATT_INFO_MAX,
+};
+
 struct register_offset {
 	u16 address[MAX_ADDRESS];
 };
@@ -395,6 +453,22 @@
 	}
 }
 
+enum slope_limit_status {
+	LOW_TEMP_CHARGE,
+	HIGH_TEMP_CHARGE,
+	LOW_TEMP_DISCHARGE,
+	HIGH_TEMP_DISCHARGE,
+	SLOPE_LIMIT_MAX,
+};
+
+#define VOLT_GAIN_MAX		3
+struct dischg_gain_soc {
+	bool			enable;
+	u32			soc[VOLT_GAIN_MAX];
+	u32			medc_gain[VOLT_GAIN_MAX];
+	u32			highc_gain[VOLT_GAIN_MAX];
+};
+
 #define THERMAL_COEFF_N_BYTES		6
 struct fg_chip {
 	struct device		*dev;
@@ -420,6 +494,7 @@
 	struct completion	first_soc_done;
 	struct power_supply	*bms_psy;
 	struct power_supply_desc	bms_psy_d;
+	spinlock_t		sec_access_lock;
 	struct mutex		rw_lock;
 	struct mutex		sysfs_restart_lock;
 	struct delayed_work	batt_profile_init;
@@ -449,6 +524,7 @@
 	struct fg_wakeup_source	update_sram_wakeup_source;
 	bool			fg_restarting;
 	bool			profile_loaded;
+	bool			soc_reporting_ready;
 	bool			use_otp_profile;
 	bool			battery_missing;
 	bool			power_supply_registered;
@@ -459,6 +535,7 @@
 	bool			charge_done;
 	bool			resume_soc_lowered;
 	bool			vbat_low_irq_enabled;
+	bool			full_soc_irq_enabled;
 	bool			charge_full;
 	bool			hold_soc_while_full;
 	bool			input_present;
@@ -467,6 +544,10 @@
 	bool			bad_batt_detection_en;
 	bool			bcl_lpm_disabled;
 	bool			charging_disabled;
+	bool			use_vbat_low_empty_soc;
+	bool			fg_shutdown;
+	bool			use_soft_jeita_irq;
+	bool			allow_false_negative_isense;
 	struct delayed_work	update_jeita_setting;
 	struct delayed_work	update_sram_data;
 	struct delayed_work	update_temp_work;
@@ -491,6 +572,7 @@
 	int			prev_status;
 	int			health;
 	enum fg_batt_aging_mode	batt_aging_mode;
+	struct alarm		hard_jeita_alarm;
 	/* capacity learning */
 	struct fg_learning_data	learning_data;
 	struct alarm		fg_cap_learning_alarm;
@@ -498,6 +580,7 @@
 	struct fg_cc_soc_data	sw_cc_soc_data;
 	/* rslow compensation */
 	struct fg_rslow_data	rslow_comp;
+	int			rconn_mohm;
 	/* cycle counter */
 	struct fg_cyc_ctr_data	cyc_ctr;
 	/* iadc compensation */
@@ -510,6 +593,8 @@
 	bool			jeita_hysteresis_support;
 	bool			batt_hot;
 	bool			batt_cold;
+	bool			batt_warm;
+	bool			batt_cool;
 	int			cold_hysteresis;
 	int			hot_hysteresis;
 	/* ESR pulse tuning */
@@ -518,6 +603,47 @@
 	bool			esr_extract_disabled;
 	bool			imptr_pulse_slow_en;
 	bool			esr_pulse_tune_en;
+	/* Slope limiter */
+	struct work_struct	slope_limiter_work;
+	struct fg_wakeup_source	slope_limit_wakeup_source;
+	bool			soc_slope_limiter_en;
+	enum slope_limit_status	slope_limit_sts;
+	u32			slope_limit_temp;
+	u32			slope_limit_coeffs[SLOPE_LIMIT_MAX];
+	/* Discharge soc gain */
+	struct work_struct	dischg_gain_work;
+	struct fg_wakeup_source	dischg_gain_wakeup_source;
+	struct dischg_gain_soc	dischg_gain;
+	/* IMA error recovery */
+	struct completion	fg_reset_done;
+	struct work_struct	ima_error_recovery_work;
+	struct fg_wakeup_source	fg_reset_wakeup_source;
+	struct mutex		ima_recovery_lock;
+	bool			ima_error_handling;
+	bool			block_sram_access;
+	bool			irqs_enabled;
+	bool			use_last_soc;
+	int			last_soc;
+	/* Validating temperature */
+	int			last_good_temp;
+	int			batt_temp_low_limit;
+	int			batt_temp_high_limit;
+	/* Validating CC_SOC */
+	struct work_struct	cc_soc_store_work;
+	struct fg_wakeup_source	cc_soc_wakeup_source;
+	int			cc_soc_limit_pct;
+	bool			use_last_cc_soc;
+	int64_t			last_cc_soc;
+	/* Sanity check */
+	struct delayed_work	check_sanity_work;
+	struct fg_wakeup_source	sanity_wakeup_source;
+	u8			last_beat_count;
+	/* Batt_info restore */
+	int			batt_info[BATT_INFO_MAX];
+	int			batt_info_id;
+	bool			batt_info_restore;
+	bool			*batt_range_ocv;
+	int			*batt_range_pct;
 };
 
 /* FG_MEMIF DEBUGFS structures */
@@ -661,17 +787,56 @@
 	return rc;
 }
 
-static int fg_masked_write(struct fg_chip *chip, u16 addr,
+static int fg_masked_write_raw(struct fg_chip *chip, u16 addr,
 		u8 mask, u8 val, int len)
 {
 	int rc;
 
 	rc = regmap_update_bits(chip->regmap, addr, mask, val);
-	if (rc) {
+	if (rc)
 		pr_err("spmi write failed: addr=%03X, rc=%d\n", addr, rc);
-		return rc;
+
+	return rc;
+}
+
+static int fg_masked_write(struct fg_chip *chip, u16 addr,
+		u8 mask, u8 val, int len)
+{
+	int rc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chip->sec_access_lock, flags);
+	rc = fg_masked_write_raw(chip, addr, mask, val, len);
+	spin_unlock_irqrestore(&chip->sec_access_lock, flags);
+
+	return rc;
+}
+
+#define SEC_ACCESS_OFFSET	0xD0
+#define SEC_ACCESS_VALUE	0xA5
+#define PERIPHERAL_MASK		0xFF
+static int fg_sec_masked_write(struct fg_chip *chip, u16 addr, u8 mask, u8 val,
+		int len)
+{
+	int rc;
+	unsigned long flags;
+	u8 temp;
+	u16 base = addr & (~PERIPHERAL_MASK);
+
+	spin_lock_irqsave(&chip->sec_access_lock, flags);
+	temp = SEC_ACCESS_VALUE;
+	rc = fg_write(chip, &temp, base + SEC_ACCESS_OFFSET, 1);
+	if (rc) {
+		pr_err("Unable to unlock sec_access: %d\n", rc);
+		goto out;
 	}
 
+	rc = fg_masked_write_raw(chip, addr, mask, val, len);
+	if (rc)
+		pr_err("Unable to write securely to address 0x%x: %d", addr,
+			rc);
+out:
+	spin_unlock_irqrestore(&chip->sec_access_lock, flags);
 	return rc;
 }
 
@@ -952,6 +1117,7 @@
 	int rc = 0, user_cnt = 0, sublen;
 	bool access_configured = false;
 	u8 *wr_data = val, word[4];
+	u16 orig_address = address;
 	char str[DEBUG_PRINT_BUFFER_SIZE];
 
 	if (address < RAM_OFFSET)
@@ -960,8 +1126,8 @@
 	if (offset > 3)
 		return -EINVAL;
 
-	address = ((address + offset) / 4) * 4;
-	offset = (address + offset) % 4;
+	address = ((orig_address + offset) / 4) * 4;
+	offset = (orig_address + offset) % 4;
 
 	user_cnt = atomic_add_return(1, &chip->memif_user_cnt);
 	if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
@@ -1061,50 +1227,253 @@
 #define MEM_INTF_IMA_EXP_STS		0x55
 #define MEM_INTF_IMA_HW_STS		0x56
 #define MEM_INTF_IMA_BYTE_EN		0x60
-#define IMA_ADDR_STBL_ERR		BIT(7)
-#define IMA_WR_ACS_ERR			BIT(6)
-#define IMA_RD_ACS_ERR			BIT(5)
 #define IMA_IACS_CLR			BIT(2)
 #define IMA_IACS_RDY			BIT(1)
-static int fg_check_ima_exception(struct fg_chip *chip)
+static int fg_run_iacs_clear_sequence(struct fg_chip *chip)
+{
+	int rc = 0;
+	u8 temp;
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Running IACS clear sequence\n");
+
+	/* clear the error */
+	rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+				IMA_IACS_CLR, IMA_IACS_CLR, 1);
+	if (rc) {
+		pr_err("Error writing to IMA_CFG, rc=%d\n", rc);
+		return rc;
+	}
+
+	temp = 0x4;
+	rc = fg_write(chip, &temp, MEM_INTF_ADDR_LSB(chip) + 1, 1);
+	if (rc) {
+		pr_err("Error writing to MEM_INTF_ADDR_MSB, rc=%d\n", rc);
+		return rc;
+	}
+
+	temp = 0x0;
+	rc = fg_write(chip, &temp, MEM_INTF_WR_DATA0(chip) + 3, 1);
+	if (rc) {
+		pr_err("Error writing to WR_DATA3, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_read(chip, &temp, MEM_INTF_RD_DATA0(chip) + 3, 1);
+	if (rc) {
+		pr_err("Error writing to RD_DATA3, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+				IMA_IACS_CLR, 0, 1);
+	if (rc) {
+		pr_err("Error writing to IMA_CFG, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("IACS clear sequence complete!\n");
+	return rc;
+}
+
+#define IACS_ERR_BIT		BIT(0)
+#define XCT_ERR_BIT		BIT(1)
+#define DATA_RD_ERR_BIT		BIT(3)
+#define DATA_WR_ERR_BIT		BIT(4)
+#define ADDR_BURST_WRAP_BIT	BIT(5)
+#define ADDR_RNG_ERR_BIT	BIT(6)
+#define ADDR_SRC_ERR_BIT	BIT(7)
+static int fg_check_ima_exception(struct fg_chip *chip, bool check_hw_sts)
 {
 	int rc = 0, ret = 0;
-	u8 err_sts, exp_sts = 0, hw_sts = 0;
+	u8 err_sts = 0, exp_sts = 0, hw_sts = 0;
+	bool run_err_clr_seq = false;
 
 	rc = fg_read(chip, &err_sts,
 			chip->mem_base + MEM_INTF_IMA_ERR_STS, 1);
 	if (rc) {
-		pr_err("failed to read beat count rc=%d\n", rc);
+		pr_err("failed to read IMA_ERR_STS, rc=%d\n", rc);
 		return rc;
 	}
 
-	if (err_sts & (IMA_ADDR_STBL_ERR | IMA_WR_ACS_ERR | IMA_RD_ACS_ERR)) {
-		u8 temp;
-
-		fg_read(chip, &exp_sts,
+	rc = fg_read(chip, &exp_sts,
 			chip->mem_base + MEM_INTF_IMA_EXP_STS, 1);
-		fg_read(chip, &hw_sts,
-			chip->mem_base + MEM_INTF_IMA_HW_STS, 1);
-		pr_err("IMA access failed ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
-				err_sts, exp_sts, hw_sts);
-		rc = err_sts;
-
-		/* clear the error */
-		ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
-					IMA_IACS_CLR, IMA_IACS_CLR, 1);
-		temp = 0x4;
-		ret |= fg_write(chip, &temp, MEM_INTF_ADDR_LSB(chip) + 1, 1);
-		temp = 0x0;
-		ret |= fg_write(chip, &temp, MEM_INTF_WR_DATA0(chip) + 3, 1);
-		ret |= fg_read(chip, &temp, MEM_INTF_RD_DATA0(chip) + 3, 1);
-		ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
-					IMA_IACS_CLR, 0, 1);
-		if (!ret)
-			return -EAGAIN;
-
-		pr_err("Error clearing IMA exception ret=%d\n", ret);
+	if (rc) {
+		pr_err("Error in reading IMA_EXP_STS, rc=%d\n", rc);
+		return rc;
 	}
 
+	rc = fg_read(chip, &hw_sts,
+			chip->mem_base + MEM_INTF_IMA_HW_STS, 1);
+	if (rc) {
+		pr_err("Error in reading IMA_HW_STS, rc=%d\n", rc);
+		return rc;
+	}
+
+	pr_info_once("Initial ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
+		err_sts, exp_sts, hw_sts);
+
+	if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+		pr_info("ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
+			err_sts, exp_sts, hw_sts);
+
+	if (check_hw_sts) {
+		/*
+		 * Lower nibble should be equal to upper nibble before SRAM
+		 * transactions begins from SW side. If they are unequal, then
+		 * the error clear sequence should be run irrespective of IMA
+		 * exception errors.
+		 */
+		if ((hw_sts & 0x0F) != hw_sts >> 4) {
+			pr_err("IMA HW not in correct state, hw_sts=%x\n",
+				hw_sts);
+			run_err_clr_seq = true;
+		}
+	}
+
+	if (exp_sts & (IACS_ERR_BIT | XCT_ERR_BIT | DATA_RD_ERR_BIT |
+		DATA_WR_ERR_BIT | ADDR_BURST_WRAP_BIT | ADDR_RNG_ERR_BIT |
+		ADDR_SRC_ERR_BIT)) {
+		pr_err("IMA exception bit set, exp_sts=%x\n", exp_sts);
+		run_err_clr_seq = true;
+	}
+
+	if (run_err_clr_seq) {
+		ret = fg_run_iacs_clear_sequence(chip);
+		if (!ret)
+			return -EAGAIN;
+		else
+			pr_err("Error clearing IMA exception ret=%d\n", ret);
+	}
+
+	return rc;
+}
+
+static void fg_enable_irqs(struct fg_chip *chip, bool enable)
+{
+	if (!(enable ^ chip->irqs_enabled))
+		return;
+
+	if (enable) {
+		enable_irq(chip->soc_irq[DELTA_SOC].irq);
+		enable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
+		if (!chip->full_soc_irq_enabled) {
+			enable_irq(chip->soc_irq[FULL_SOC].irq);
+			enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+			chip->full_soc_irq_enabled = true;
+		}
+		enable_irq(chip->batt_irq[BATT_MISSING].irq);
+		if (!chip->vbat_low_irq_enabled) {
+			enable_irq(chip->batt_irq[VBATT_LOW].irq);
+			enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+			chip->vbat_low_irq_enabled = true;
+		}
+		if (!chip->use_vbat_low_empty_soc) {
+			enable_irq(chip->soc_irq[EMPTY_SOC].irq);
+			enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
+		}
+		chip->irqs_enabled = true;
+	} else {
+		disable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
+		disable_irq_nosync(chip->soc_irq[DELTA_SOC].irq);
+		if (chip->full_soc_irq_enabled) {
+			disable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+			disable_irq_nosync(chip->soc_irq[FULL_SOC].irq);
+			chip->full_soc_irq_enabled = false;
+		}
+		disable_irq(chip->batt_irq[BATT_MISSING].irq);
+		if (chip->vbat_low_irq_enabled) {
+			disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+			disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+			chip->vbat_low_irq_enabled = false;
+		}
+		if (!chip->use_vbat_low_empty_soc) {
+			disable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
+			disable_irq_nosync(chip->soc_irq[EMPTY_SOC].irq);
+		}
+		chip->irqs_enabled = false;
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("FG interrupts are %sabled\n", enable ? "en" : "dis");
+}
+
+static void fg_check_ima_error_handling(struct fg_chip *chip)
+{
+	if (chip->ima_error_handling) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("IMA error is handled already!\n");
+		return;
+	}
+	mutex_lock(&chip->ima_recovery_lock);
+	fg_enable_irqs(chip, false);
+	chip->use_last_cc_soc = true;
+	chip->ima_error_handling = true;
+	if (!work_pending(&chip->ima_error_recovery_work))
+		schedule_work(&chip->ima_error_recovery_work);
+	mutex_unlock(&chip->ima_recovery_lock);
+}
+
+#define SOC_ALG_ST		0xCF
+#define FGXCT_PRD		BIT(7)
+#define ALG_ST_CHECK_COUNT	20
+static int fg_check_alg_status(struct fg_chip *chip)
+{
+	int rc = 0, timeout = ALG_ST_CHECK_COUNT, count = 0;
+	u8 ima_opr_sts, alg_sts = 0, temp = 0;
+
+	if (!fg_reset_on_lockup)  {
+		pr_info("FG lockup detection cannot be run\n");
+		return 0;
+	}
+
+	rc = fg_read(chip, &alg_sts, chip->soc_base + SOC_ALG_ST, 1);
+	if (rc) {
+		pr_err("Error in reading SOC_ALG_ST, rc=%d\n", rc);
+		return rc;
+	}
+
+	while (1) {
+		rc = fg_read(chip, &ima_opr_sts,
+			chip->mem_base + MEM_INTF_IMA_OPR_STS, 1);
+		if (!rc && !(ima_opr_sts & FGXCT_PRD))
+			break;
+
+		if (rc) {
+			pr_err("Error in reading IMA_OPR_STS, rc=%d\n",
+				rc);
+			break;
+		}
+
+		rc = fg_read(chip, &temp, chip->soc_base + SOC_ALG_ST,
+			1);
+		if (rc) {
+			pr_err("Error in reading SOC_ALG_ST, rc=%d\n",
+				rc);
+			break;
+		}
+
+		if ((ima_opr_sts & FGXCT_PRD) && (temp == alg_sts))
+			count++;
+
+		/* Wait for ~10ms while polling ALG_ST & IMA_OPR_STS */
+		usleep_range(9000, 11000);
+
+		if (!(--timeout))
+			break;
+	}
+
+	if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+		pr_info("ima_opr_sts: %x  alg_sts: %x count=%d\n", ima_opr_sts,
+			alg_sts, count);
+
+	if (count == ALG_ST_CHECK_COUNT) {
+		/* If we are here, that means FG ALG is stuck */
+		pr_err("ALG is stuck\n");
+		fg_check_ima_error_handling(chip);
+		rc = -EBUSY;
+	}
 	return rc;
 }
 
@@ -1122,19 +1491,25 @@
 	while (1) {
 		rc = fg_read(chip, &ima_opr_sts,
 			chip->mem_base + MEM_INTF_IMA_OPR_STS, 1);
-		if (!rc && (ima_opr_sts & IMA_IACS_RDY))
+		if (!rc && (ima_opr_sts & IMA_IACS_RDY)) {
 			break;
+		} else {
+			if (!(--timeout) || rc)
+				break;
 
-		if (!(--timeout) || rc)
-			break;
-		/* delay for iacs_ready to be asserted */
-		usleep_range(5000, 7000);
+			/* delay for iacs_ready to be asserted */
+			usleep_range(5000, 7000);
+		}
 	}
 
 	if (!timeout || rc) {
-		pr_err("IACS_RDY not set\n");
+		pr_err("IACS_RDY not set, ima_opr_sts: %x\n", ima_opr_sts);
+		rc = fg_check_alg_status(chip);
+		if (rc && rc != -EBUSY)
+			pr_err("Couldn't check FG ALG status, rc=%d\n",
+				rc);
 		/* perform IACS_CLR sequence */
-		fg_check_ima_exception(chip);
+		fg_check_ima_exception(chip, false);
 		return -EBUSY;
 	}
 
@@ -1154,15 +1529,16 @@
 
 	while (len > 0) {
 		num_bytes = (offset + len) > BUF_LEN ?
-			(BUF_LEN - offset) : len;
+				(BUF_LEN - offset) : len;
 		/* write to byte_enable */
 		for (i = offset; i < (offset + num_bytes); i++)
 			byte_enable |= BIT(i);
 
 		rc = fg_write(chip, &byte_enable,
-			chip->mem_base + MEM_INTF_IMA_BYTE_EN, 1);
+				chip->mem_base + MEM_INTF_IMA_BYTE_EN, 1);
 		if (rc) {
-			pr_err("Unable to write to byte_en_reg rc=%d\n", rc);
+			pr_err("Unable to write to byte_en_reg rc=%d\n",
+							rc);
 			return rc;
 		}
 			/* write data */
@@ -1193,12 +1569,13 @@
 
 		rc = fg_check_iacs_ready(chip);
 		if (rc) {
-			pr_debug("IACS_RDY failed rc=%d\n", rc);
+			pr_err("IACS_RDY failed post write to address %x offset %d rc=%d\n",
+				address, offset, rc);
 			return rc;
 		}
 
 		/* check for error condition */
-		rc = fg_check_ima_exception(chip);
+		rc = fg_check_ima_exception(chip, false);
 		if (rc) {
 			pr_err("IMA transaction failed rc=%d", rc);
 			return rc;
@@ -1239,12 +1616,13 @@
 
 		rc = fg_check_iacs_ready(chip);
 		if (rc) {
-			pr_debug("IACS_RDY failed rc=%d\n", rc);
+			pr_err("IACS_RDY failed post read for address %x offset %d rc=%d\n",
+				address, offset, rc);
 			return rc;
 		}
 
 		/* check for error condition */
-		rc = fg_check_ima_exception(chip);
+		rc = fg_check_ima_exception(chip, false);
 		if (rc) {
 			pr_err("IMA transaction failed rc=%d", rc);
 			return rc;
@@ -1296,7 +1674,7 @@
 		 * clear, then return an error instead of waiting for it again.
 		 */
 		if  (time_count > 4) {
-			pr_err("Waited for 1.5 seconds polling RIF_MEM_ACCESS_REQ\n");
+			pr_err("Waited for ~16ms polling RIF_MEM_ACCESS_REQ\n");
 			return -ETIMEDOUT;
 		}
 
@@ -1322,7 +1700,8 @@
 
 	rc = fg_check_iacs_ready(chip);
 	if (rc) {
-		pr_debug("IACS_RDY failed rc=%d\n", rc);
+		pr_err("IACS_RDY failed before setting address: %x offset: %d rc=%d\n",
+			address, offset, rc);
 		return rc;
 	}
 
@@ -1335,7 +1714,8 @@
 
 	rc = fg_check_iacs_ready(chip);
 	if (rc)
-		pr_debug("IACS_RDY failed rc=%d\n", rc);
+		pr_err("IACS_RDY failed after setting address: %x offset: %d rc=%d\n",
+			address, offset, rc);
 
 	return rc;
 }
@@ -1346,10 +1726,13 @@
 static int fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address,
 						int len, int offset)
 {
-	int rc = 0, orig_address = address;
+	int rc = 0, ret, orig_address = address;
 	u8 start_beat_count, end_beat_count, count = 0;
 	bool retry = false;
 
+	if (chip->fg_shutdown)
+		return -EINVAL;
+
 	if (offset > 3) {
 		pr_err("offset too large %d\n", offset);
 		return -EINVAL;
@@ -1372,11 +1755,22 @@
 	}
 
 	mutex_lock(&chip->rw_lock);
+	if (fg_debug_mask & FG_MEM_DEBUG_READS)
+		pr_info("Read for %d bytes is attempted @ 0x%x[%d]\n",
+			len, address, offset);
 
 retry:
+	if (count >= RETRY_COUNT) {
+		pr_err("Retried reading 3 times\n");
+		retry = false;
+		goto out;
+	}
+
 	rc = fg_interleaved_mem_config(chip, val, address, offset, len, 0);
 	if (rc) {
 		pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+		retry = true;
+		count++;
 		goto out;
 	}
 
@@ -1385,18 +1779,21 @@
 			chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
 	if (rc) {
 		pr_err("failed to read beat count rc=%d\n", rc);
+		retry = true;
+		count++;
 		goto out;
 	}
 
 	/* read data */
 	rc = __fg_interleaved_mem_read(chip, val, address, offset, len);
 	if (rc) {
+		count++;
 		if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
-			count++;
 			pr_err("IMA access failed retry_count = %d\n", count);
 			goto retry;
 		} else {
 			pr_err("failed to read SRAM address rc = %d\n", rc);
+			retry = true;
 			goto out;
 		}
 	}
@@ -1406,6 +1803,8 @@
 			chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
 	if (rc) {
 		pr_err("failed to read beat count rc=%d\n", rc);
+		retry = true;
+		count++;
 		goto out;
 	}
 
@@ -1418,12 +1817,13 @@
 		if (fg_debug_mask & FG_MEM_DEBUG_READS)
 			pr_info("Beat count do not match - retry transaction\n");
 		retry = true;
+		count++;
 	}
 out:
 	/* Release IMA access */
-	rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
-	if (rc)
-		pr_err("failed to reset IMA access bit rc = %d\n", rc);
+	ret = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
+	if (ret)
+		pr_err("failed to reset IMA access bit ret = %d\n", ret);
 
 	if (retry) {
 		retry = false;
@@ -1439,8 +1839,12 @@
 static int fg_interleaved_mem_write(struct fg_chip *chip, u8 *val, u16 address,
 							int len, int offset)
 {
-	int rc = 0, orig_address = address;
+	int rc = 0, ret, orig_address = address;
 	u8 count = 0;
+	bool retry = false;
+
+	if (chip->fg_shutdown)
+		return -EINVAL;
 
 	if (address < RAM_OFFSET)
 		return -EINVAL;
@@ -1455,32 +1859,49 @@
 	offset = (orig_address + offset) % 4;
 
 	mutex_lock(&chip->rw_lock);
+	if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
+		pr_info("Write for %d bytes is attempted @ 0x%x[%d]\n",
+			len, address, offset);
 
 retry:
+	if (count >= RETRY_COUNT) {
+		pr_err("Retried writing 3 times\n");
+		retry = false;
+		goto out;
+	}
+
 	rc = fg_interleaved_mem_config(chip, val, address, offset, len, 1);
 	if (rc) {
-		pr_err("failed to xonfigure SRAM for IMA rc = %d\n", rc);
+		pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+		retry = true;
+		count++;
 		goto out;
 	}
 
 	/* write data */
 	rc = __fg_interleaved_mem_write(chip, val, address, offset, len);
 	if (rc) {
+		count++;
 		if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
-			count++;
 			pr_err("IMA access failed retry_count = %d\n", count);
 			goto retry;
 		} else {
 			pr_err("failed to write SRAM address rc = %d\n", rc);
+			retry = true;
 			goto out;
 		}
 	}
 
 out:
 	/* Release IMA access */
-	rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
-	if (rc)
-		pr_err("failed to reset IMA access bit rc = %d\n", rc);
+	ret = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
+	if (ret)
+		pr_err("failed to reset IMA access bit ret = %d\n", ret);
+
+	if (retry) {
+		retry = false;
+		goto retry;
+	}
 
 	mutex_unlock(&chip->rw_lock);
 	fg_relax(&chip->memif_wakeup_source);
@@ -1490,6 +1911,9 @@
 static int fg_mem_read(struct fg_chip *chip, u8 *val, u16 address,
 			int len, int offset, bool keep_access)
 {
+	if (chip->block_sram_access)
+		return -EBUSY;
+
 	if (chip->ima_supported)
 		return fg_interleaved_mem_read(chip, val, address,
 						len, offset);
@@ -1501,6 +1925,9 @@
 static int fg_mem_write(struct fg_chip *chip, u8 *val, u16 address,
 		int len, int offset, bool keep_access)
 {
+	if (chip->block_sram_access)
+		return -EBUSY;
+
 	if (chip->ima_supported)
 		return fg_interleaved_mem_write(chip, val, address,
 						len, offset);
@@ -1538,6 +1965,62 @@
 	return rc;
 }
 
+static u8 sram_backup_buffer[100];
+static int fg_backup_sram_registers(struct fg_chip *chip, bool save)
+{
+	int rc, i, len, offset;
+	u16 address;
+	u8 *ptr;
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("%sing SRAM registers\n", save ? "Back" : "Restor");
+
+	ptr = sram_backup_buffer;
+	for (i = 0; i < FG_BACKUP_MAX; i++) {
+		address = fg_backup_regs[i].address;
+		offset = fg_backup_regs[i].offset;
+		len = fg_backup_regs[i].len;
+		if (save)
+			rc = fg_interleaved_mem_read(chip, ptr, address,
+					len, offset);
+		else
+			rc = fg_interleaved_mem_write(chip, ptr, address,
+					len, offset);
+		if (rc) {
+			pr_err("Error in reading %d bytes from %x[%d], rc=%d\n",
+				len, address, offset, rc);
+			break;
+		}
+		ptr += len;
+	}
+
+	return rc;
+}
+
+#define SOC_FG_RESET	0xF3
+#define RESET_MASK	(BIT(7) | BIT(5))
+static int fg_reset(struct fg_chip *chip, bool reset)
+{
+	int rc;
+
+	rc = fg_sec_masked_write(chip, chip->soc_base + SOC_FG_RESET,
+		0xFF, reset ? RESET_MASK : 0, 1);
+	if (rc)
+		pr_err("Error in writing to 0x%x, rc=%d\n", SOC_FG_RESET, rc);
+
+	return rc;
+}
+
+static void fg_handle_battery_insertion(struct fg_chip *chip)
+{
+	reinit_completion(&chip->batt_id_avail);
+	reinit_completion(&chip->fg_reset_done);
+	schedule_delayed_work(&chip->batt_profile_init, 0);
+	cancel_delayed_work(&chip->update_sram_data);
+	schedule_delayed_work(&chip->update_sram_data, msecs_to_jiffies(0));
+}
+
+
 static int soc_to_setpoint(int soc)
 {
 	return DIV_ROUND_CLOSEST(soc * 255, 100);
@@ -1550,6 +2033,7 @@
 	val = DIV_ROUND_CLOSEST(vbatt_mv * 32768, 5000);
 	data[0] = val & 0xFF;
 	data[1] = val >> 8;
+	return;
 }
 
 static u8 batt_to_setpoint_8b(int vbatt_mv)
@@ -1678,14 +2162,37 @@
 	return rc;
 }
 
+#define VBATT_LOW_STS_BIT BIT(2)
+static int fg_get_vbatt_status(struct fg_chip *chip, bool *vbatt_low_sts)
+{
+	int rc = 0;
+	u8 fg_batt_sts;
+
+	rc = fg_read(chip, &fg_batt_sts, INT_RT_STS(chip->batt_base), 1);
+	if (rc)
+		pr_err("spmi read failed: addr=%03X, rc=%d\n",
+				INT_RT_STS(chip->batt_base), rc);
+	else
+		*vbatt_low_sts = !!(fg_batt_sts & VBATT_LOW_STS_BIT);
+
+	return rc;
+}
+
 #define SOC_EMPTY	BIT(3)
 static bool fg_is_batt_empty(struct fg_chip *chip)
 {
 	u8 fg_soc_sts;
 	int rc;
+	bool vbatt_low_sts;
 
-	rc = fg_read(chip, &fg_soc_sts,
-				 INT_RT_STS(chip->soc_base), 1);
+	if (chip->use_vbat_low_empty_soc) {
+		if (fg_get_vbatt_status(chip, &vbatt_low_sts))
+			return false;
+
+		return vbatt_low_sts;
+	}
+
+	rc = fg_read(chip, &fg_soc_sts, INT_RT_STS(chip->soc_base), 1);
 	if (rc) {
 		pr_err("spmi read failed: addr=%03X, rc=%d\n",
 				INT_RT_STS(chip->soc_base), rc);
@@ -1732,7 +2239,16 @@
 #define FULL_SOC_RAW		0xFF
 static int get_prop_capacity(struct fg_chip *chip)
 {
-	int msoc;
+	int msoc, rc;
+	bool vbatt_low_sts;
+
+	if (chip->use_last_soc && chip->last_soc) {
+		if (chip->last_soc == FULL_SOC_RAW)
+			return FULL_CAPACITY;
+		return DIV_ROUND_CLOSEST((chip->last_soc - 1) *
+				(FULL_CAPACITY - 2),
+				FULL_SOC_RAW - 2) + 1;
+	}
 
 	if (chip->battery_missing)
 		return MISSING_CAPACITY;
@@ -1747,10 +2263,28 @@
 		return EMPTY_CAPACITY;
 	}
 	msoc = get_monotonic_soc_raw(chip);
-	if (msoc == 0)
-		return EMPTY_CAPACITY;
-	else if (msoc == FULL_SOC_RAW)
+	if (msoc == 0) {
+		if (fg_reset_on_lockup && chip->use_vbat_low_empty_soc) {
+			rc = fg_get_vbatt_status(chip, &vbatt_low_sts);
+			if (rc) {
+				pr_err("Error in reading vbatt_status, rc=%d\n",
+					rc);
+				return EMPTY_CAPACITY;
+			}
+
+			if (!vbatt_low_sts)
+				return DIV_ROUND_CLOSEST((chip->last_soc - 1) *
+						(FULL_CAPACITY - 2),
+						FULL_SOC_RAW - 2) + 1;
+			else
+				return EMPTY_CAPACITY;
+		} else {
+			return EMPTY_CAPACITY;
+		}
+	} else if (msoc == FULL_SOC_RAW) {
 		return FULL_CAPACITY;
+	}
+
 	return DIV_ROUND_CLOSEST((msoc - 1) * (FULL_CAPACITY - 2),
 			FULL_SOC_RAW - 2) + 1;
 }
@@ -1843,6 +2377,25 @@
 	return 0;
 }
 
+#define IGNORE_FALSE_NEGATIVE_ISENSE_BIT	BIT(3)
+static int set_prop_ignore_false_negative_isense(struct fg_chip *chip,
+							bool ignore)
+{
+	int rc;
+
+	rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+			IGNORE_FALSE_NEGATIVE_ISENSE_BIT,
+			ignore ? IGNORE_FALSE_NEGATIVE_ISENSE_BIT : 0,
+			EXTERNAL_SENSE_OFFSET);
+	if (rc) {
+		pr_err("failed to %s isense false negative ignore rc=%d\n",
+				ignore ? "enable" : "disable", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
 #define EXPONENT_MASK		0xF800
 #define MANTISSA_MASK		0x3FF
 #define SIGN			BIT(10)
@@ -1953,8 +2506,7 @@
 		return rc;
 	}
 
-	if (fg_debug_mask & FG_IRQS)
-		pr_info("fg batt sts 0x%x\n", fg_batt_sts);
+	pr_debug("fg batt sts 0x%x\n", fg_batt_sts);
 
 	return (fg_batt_sts & BATT_IDED) ? 1 : 0;
 }
@@ -1984,7 +2536,7 @@
 #define DECIKELVIN	2730
 #define SRAM_PERIOD_NO_ID_UPDATE_MS	100
 #define FULL_PERCENT_28BIT		0xFFFFFFF
-static void update_sram_data(struct fg_chip *chip, int *resched_ms)
+static int update_sram_data(struct fg_chip *chip, int *resched_ms)
 {
 	int i, j, rc = 0;
 	u8 reg[4];
@@ -2060,6 +2612,31 @@
 	}
 	fg_mem_release(chip);
 
+	/* Backup the registers whenever no error happens during update */
+	if (fg_reset_on_lockup && !chip->ima_error_handling) {
+		if (!rc) {
+			if (fg_debug_mask & FG_STATUS)
+				pr_info("backing up SRAM registers\n");
+			rc = fg_backup_sram_registers(chip, true);
+			if (rc) {
+				pr_err("Couldn't save sram registers\n");
+				goto out;
+			}
+			if (!chip->use_last_soc) {
+				chip->last_soc = get_monotonic_soc_raw(chip);
+				chip->last_cc_soc = div64_s64(
+					(int64_t)chip->last_soc *
+					FULL_PERCENT_28BIT, FULL_SOC_RAW);
+			}
+			if (fg_debug_mask & FG_STATUS)
+				pr_info("last_soc: %d last_cc_soc: %lld\n",
+					chip->last_soc, chip->last_cc_soc);
+		} else {
+			pr_err("update_sram failed\n");
+			goto out;
+		}
+	}
+
 	if (!rc)
 		get_current_time(&chip->last_sram_update_time);
 
@@ -2070,7 +2647,55 @@
 	} else {
 		*resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS;
 	}
+out:
 	fg_relax(&chip->update_sram_wakeup_source);
+	return rc;
+}
+
+#define SANITY_CHECK_PERIOD_MS	5000
+static void check_sanity_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work,
+				struct fg_chip,
+				check_sanity_work.work);
+	int rc = 0;
+	u8 beat_count;
+	bool tried_once = false;
+
+	fg_stay_awake(&chip->sanity_wakeup_source);
+
+try_again:
+	rc = fg_read(chip, &beat_count,
+			chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
+	if (rc) {
+		pr_err("failed to read beat count rc=%d\n", rc);
+		goto resched;
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("current: %d, prev: %d\n", beat_count,
+			chip->last_beat_count);
+
+	if (chip->last_beat_count == beat_count) {
+		if (!tried_once) {
+			/* Wait for 1 FG cycle and read it once again */
+			msleep(1500);
+			tried_once = true;
+			goto try_again;
+		} else {
+			pr_err("Beat count not updating\n");
+			fg_check_ima_error_handling(chip);
+			goto out;
+		}
+	} else {
+		chip->last_beat_count = beat_count;
+	}
+resched:
+	schedule_delayed_work(
+		&chip->check_sanity_work,
+		msecs_to_jiffies(SANITY_CHECK_PERIOD_MS));
+out:
+	fg_relax(&chip->sanity_wakeup_source);
 }
 
 #define SRAM_TIMEOUT_MS			3000
@@ -2079,8 +2704,9 @@
 	struct fg_chip *chip = container_of(work,
 				struct fg_chip,
 				update_sram_data.work);
-	int resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS, ret;
+	int resched_ms, ret;
 	bool tried_again = false;
+	int rc = 0;
 
 wait:
 	/* Wait for MEMIF access revoked */
@@ -2094,14 +2720,19 @@
 		goto wait;
 	} else if (ret <= 0) {
 		pr_err("transaction timed out ret=%d\n", ret);
+		if (fg_is_batt_id_valid(chip))
+			resched_ms = fg_sram_update_period_ms;
+		else
+			resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS;
 		goto out;
 	}
-	update_sram_data(chip, &resched_ms);
+	rc = update_sram_data(chip, &resched_ms);
 
 out:
-	schedule_delayed_work(
-		&chip->update_sram_data,
-		msecs_to_jiffies(resched_ms));
+	if (!rc)
+		schedule_delayed_work(
+			&chip->update_sram_data,
+			msecs_to_jiffies(resched_ms));
 }
 
 #define BATT_TEMP_OFFSET	3
@@ -2115,6 +2746,8 @@
 				TEMP_SENSE_CHARGE_BIT)
 #define TEMP_PERIOD_UPDATE_MS		10000
 #define TEMP_PERIOD_TIMEOUT_MS		3000
+#define BATT_TEMP_LOW_LIMIT		-600
+#define BATT_TEMP_HIGH_LIMIT		1500
 static void update_temp_data(struct work_struct *work)
 {
 	s16 temp;
@@ -2166,14 +2799,44 @@
 	}
 
 	temp = reg[0] | (reg[1] << 8);
-	fg_data[0].value = (temp * TEMP_LSB_16B / 1000)
-		- DECIKELVIN;
+	temp = (temp * TEMP_LSB_16B / 1000) - DECIKELVIN;
+
+	/*
+	 * If temperature is within the specified range (e.g. -60C and 150C),
+	 * update it to the userspace. Otherwise, use the last read good
+	 * temperature.
+	 */
+	if (temp > chip->batt_temp_low_limit &&
+			temp < chip->batt_temp_high_limit) {
+		chip->last_good_temp = temp;
+		fg_data[0].value = temp;
+	} else {
+		fg_data[0].value = chip->last_good_temp;
+
+		/*
+		 * If the temperature is read before and seems to be in valid
+		 * range, then a bad temperature reading could be because of
+		 * FG lockup. Trigger the FG reset sequence in such cases.
+		 */
+		if (chip->last_temp_update_time && fg_reset_on_lockup &&
+			(chip->last_good_temp > chip->batt_temp_low_limit &&
+			chip->last_good_temp < chip->batt_temp_high_limit)) {
+			pr_err("Batt_temp is %d !, triggering FG reset\n",
+				temp);
+			fg_check_ima_error_handling(chip);
+		}
+	}
 
 	if (fg_debug_mask & FG_MEM_DEBUG_READS)
 		pr_info("BATT_TEMP %d %d\n", temp, fg_data[0].value);
 
 	get_current_time(&chip->last_temp_update_time);
 
+	if (chip->soc_slope_limiter_en) {
+		fg_stay_awake(&chip->slope_limit_wakeup_source);
+		schedule_work(&chip->slope_limiter_work);
+	}
+
 out:
 	if (chip->sw_rbias_ctrl) {
 		rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
@@ -2226,18 +2889,6 @@
 	return rc;
 }
 
-#define VBATT_LOW_STS_BIT BIT(2)
-static int fg_get_vbatt_status(struct fg_chip *chip, bool *vbatt_low_sts)
-{
-	int rc = 0;
-	u8 fg_batt_sts;
-
-	rc = fg_read(chip, &fg_batt_sts, INT_RT_STS(chip->batt_base), 1);
-	if (!rc)
-		*vbatt_low_sts = !!(fg_batt_sts & VBATT_LOW_STS_BIT);
-	return rc;
-}
-
 #define BATT_CYCLE_NUMBER_REG		0x5E8
 #define BATT_CYCLE_OFFSET		0
 static void restore_cycle_counter(struct fg_chip *chip)
@@ -2301,6 +2952,9 @@
 			bucket, rc);
 	else
 		chip->cyc_ctr.count[bucket] = cyc_count;
+
+	if (fg_debug_mask & FG_POWER_SUPPLY)
+		pr_info("Stored bucket %d cyc_count: %d\n", bucket, cyc_count);
 	return rc;
 }
 
@@ -2416,6 +3070,62 @@
 	return ((int)val) * 1000;
 }
 
+#define SLOPE_LIMITER_COEFF_REG		0x430
+#define SLOPE_LIMITER_COEFF_OFFSET	3
+#define SLOPE_LIMIT_TEMP_THRESHOLD	100
+#define SLOPE_LIMIT_LOW_TEMP_CHG	45
+#define SLOPE_LIMIT_HIGH_TEMP_CHG	2
+#define SLOPE_LIMIT_LOW_TEMP_DISCHG	45
+#define SLOPE_LIMIT_HIGH_TEMP_DISCHG	2
+static void slope_limiter_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work, struct fg_chip,
+				slope_limiter_work);
+	enum slope_limit_status status;
+	int batt_temp, rc;
+	u8 buf[2];
+	int64_t val;
+
+	batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+
+	if (chip->status == POWER_SUPPLY_STATUS_CHARGING ||
+			chip->status == POWER_SUPPLY_STATUS_FULL) {
+		if (batt_temp < chip->slope_limit_temp)
+			status = LOW_TEMP_CHARGE;
+		else
+			status = HIGH_TEMP_CHARGE;
+	} else if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+		if (batt_temp < chip->slope_limit_temp)
+			status = LOW_TEMP_DISCHARGE;
+		else
+			status = HIGH_TEMP_DISCHARGE;
+	} else {
+		goto out;
+	}
+
+	if (status == chip->slope_limit_sts)
+		goto out;
+
+	val = chip->slope_limit_coeffs[status];
+	val *= MICRO_UNIT;
+	half_float_to_buffer(val, buf);
+	rc = fg_mem_write(chip, buf,
+			SLOPE_LIMITER_COEFF_REG, 2,
+			SLOPE_LIMITER_COEFF_OFFSET, 0);
+	if (rc) {
+		pr_err("Couldn't write to slope_limiter_coeff_reg, rc=%d\n",
+			rc);
+		goto out;
+	}
+
+	chip->slope_limit_sts = status;
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Slope limit sts: %d val: %lld buf[%x %x] written\n",
+			status, val, buf[0], buf[1]);
+out:
+	fg_relax(&chip->slope_limit_wakeup_source);
+}
+
 static int lookup_ocv_for_soc(struct fg_chip *chip, int soc)
 {
 	int64_t *coeffs;
@@ -2481,6 +3191,7 @@
 #define ESR_ACTUAL_REG		0x554
 #define BATTERY_ESR_REG		0x4F4
 #define TEMP_RS_TO_RSLOW_REG	0x514
+#define ESR_OFFSET		2
 static int estimate_battery_age(struct fg_chip *chip, int *actual_capacity)
 {
 	int64_t ocv_cutoff_new, ocv_cutoff_aged, temp_rs_to_rslow;
@@ -2519,7 +3230,7 @@
 
 	rc = fg_mem_read(chip, buffer, ESR_ACTUAL_REG, 2, 2, 0);
 	esr_actual = half_float(buffer);
-	rc |= fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, 2, 0);
+	rc |= fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, ESR_OFFSET, 0);
 	battery_esr = half_float(buffer);
 
 	if (rc) {
@@ -2594,124 +3305,6 @@
 	estimate_battery_age(chip, &chip->actual_cap_uah);
 }
 
-static enum power_supply_property fg_power_props[] = {
-	POWER_SUPPLY_PROP_CAPACITY,
-	POWER_SUPPLY_PROP_CAPACITY_RAW,
-	POWER_SUPPLY_PROP_CURRENT_NOW,
-	POWER_SUPPLY_PROP_VOLTAGE_NOW,
-	POWER_SUPPLY_PROP_VOLTAGE_OCV,
-	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
-	POWER_SUPPLY_PROP_CHARGE_NOW,
-	POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
-	POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
-	POWER_SUPPLY_PROP_CHARGE_FULL,
-	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
-	POWER_SUPPLY_PROP_TEMP,
-	POWER_SUPPLY_PROP_COOL_TEMP,
-	POWER_SUPPLY_PROP_WARM_TEMP,
-	POWER_SUPPLY_PROP_RESISTANCE,
-	POWER_SUPPLY_PROP_RESISTANCE_ID,
-	POWER_SUPPLY_PROP_BATTERY_TYPE,
-	POWER_SUPPLY_PROP_UPDATE_NOW,
-	POWER_SUPPLY_PROP_ESR_COUNT,
-	POWER_SUPPLY_PROP_VOLTAGE_MIN,
-	POWER_SUPPLY_PROP_CYCLE_COUNT,
-	POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
-	POWER_SUPPLY_PROP_HI_POWER,
-};
-
-static int fg_power_get_property(struct power_supply *psy,
-				       enum power_supply_property psp,
-				       union power_supply_propval *val)
-{
-	struct fg_chip *chip = power_supply_get_drvdata(psy);
-	bool vbatt_low_sts;
-
-	switch (psp) {
-	case POWER_SUPPLY_PROP_BATTERY_TYPE:
-		if (chip->battery_missing)
-			val->strval = missing_batt_type;
-		else if (chip->fg_restarting)
-			val->strval = loading_batt_type;
-		else
-			val->strval = chip->batt_type;
-		break;
-	case POWER_SUPPLY_PROP_CAPACITY:
-		val->intval = get_prop_capacity(chip);
-		break;
-	case POWER_SUPPLY_PROP_CAPACITY_RAW:
-		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_SOC);
-		break;
-	case POWER_SUPPLY_PROP_CHARGE_NOW_ERROR:
-		val->intval = get_sram_prop_now(chip, FG_DATA_VINT_ERR);
-		break;
-	case POWER_SUPPLY_PROP_CURRENT_NOW:
-		val->intval = get_sram_prop_now(chip, FG_DATA_CURRENT);
-		break;
-	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-		val->intval = get_sram_prop_now(chip, FG_DATA_VOLTAGE);
-		break;
-	case POWER_SUPPLY_PROP_VOLTAGE_OCV:
-		val->intval = get_sram_prop_now(chip, FG_DATA_OCV);
-		break;
-	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
-		val->intval = chip->batt_max_voltage_uv;
-		break;
-	case POWER_SUPPLY_PROP_TEMP:
-		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
-		break;
-	case POWER_SUPPLY_PROP_COOL_TEMP:
-		val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_COLD);
-		break;
-	case POWER_SUPPLY_PROP_WARM_TEMP:
-		val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_HOT);
-		break;
-	case POWER_SUPPLY_PROP_RESISTANCE:
-		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR);
-		break;
-	case POWER_SUPPLY_PROP_ESR_COUNT:
-		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR_COUNT);
-		break;
-	case POWER_SUPPLY_PROP_CYCLE_COUNT:
-		val->intval = fg_get_cycle_count(chip);
-		break;
-	case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
-		val->intval = chip->cyc_ctr.id;
-		break;
-	case POWER_SUPPLY_PROP_RESISTANCE_ID:
-		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ID);
-		break;
-	case POWER_SUPPLY_PROP_UPDATE_NOW:
-		val->intval = 0;
-		break;
-	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
-		if (!fg_get_vbatt_status(chip, &vbatt_low_sts))
-			val->intval = (int)vbatt_low_sts;
-		else
-			val->intval = 1;
-		break;
-	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
-		val->intval = chip->nom_cap_uah;
-		break;
-	case POWER_SUPPLY_PROP_CHARGE_FULL:
-		val->intval = chip->learning_data.learned_cc_uah;
-		break;
-	case POWER_SUPPLY_PROP_CHARGE_NOW:
-		val->intval = chip->learning_data.cc_uah;
-		break;
-	case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
-		val->intval = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
-		break;
-	case POWER_SUPPLY_PROP_HI_POWER:
-		val->intval = !!chip->bcl_lpm_disabled;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
 static int correction_times[] = {
 	1470,
 	2940,
@@ -2853,11 +3446,8 @@
 		goto fail;
 	}
 
-	if (chip->wa_flag & USE_CC_SOC_REG) {
-		mutex_unlock(&chip->learning_data.learning_lock);
-		fg_relax(&chip->capacity_learning_wakeup_source);
-		return;
-	}
+	if (chip->wa_flag & USE_CC_SOC_REG)
+		goto fail;
 
 	fg_mem_lock(chip);
 
@@ -2888,6 +3478,8 @@
 		pr_info("total_cc_uah = %lld\n", chip->learning_data.cc_uah);
 
 fail:
+	if (chip->wa_flag & USE_CC_SOC_REG)
+		fg_relax(&chip->capacity_learning_wakeup_source);
 	mutex_unlock(&chip->learning_data.learning_lock);
 	return;
 
@@ -2901,7 +3493,7 @@
 {
 	int rc;
 	u8 reg[4];
-	unsigned int temp, magnitude;
+	int temp;
 
 	rc = fg_mem_read(chip, reg, CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
 	if (rc) {
@@ -2910,20 +3502,61 @@
 	}
 
 	temp = reg[3] << 24 | reg[2] << 16 | reg[1] << 8 | reg[0];
-	magnitude = temp & CC_SOC_MAGNITUDE_MASK;
-	if (temp & CC_SOC_NEGATIVE_BIT)
-		*cc_soc = -1 * (~magnitude + 1);
-	else
-		*cc_soc = magnitude;
-
+	*cc_soc = sign_extend32(temp, 29);
 	return 0;
 }
 
+static int fg_get_current_cc(struct fg_chip *chip)
+{
+	int cc_soc, rc;
+	int64_t current_capacity;
+
+	if (!(chip->wa_flag & USE_CC_SOC_REG))
+		return chip->learning_data.cc_uah;
+
+	if (!chip->learning_data.learned_cc_uah)
+		return -EINVAL;
+
+	rc = fg_get_cc_soc(chip, &cc_soc);
+	if (rc < 0) {
+		pr_err("Failed to get cc_soc, rc=%d\n", rc);
+		return rc;
+	}
+
+	current_capacity = cc_soc * chip->learning_data.learned_cc_uah;
+	current_capacity = div64_u64(current_capacity, FULL_PERCENT_28BIT);
+	return current_capacity;
+}
+
+#define BATT_MISSING_STS BIT(6)
+static bool is_battery_missing(struct fg_chip *chip)
+{
+	int rc;
+	u8 fg_batt_sts;
+
+	rc = fg_read(chip, &fg_batt_sts,
+				 INT_RT_STS(chip->batt_base), 1);
+	if (rc) {
+		pr_err("spmi read failed: addr=%03X, rc=%d\n",
+				INT_RT_STS(chip->batt_base), rc);
+		return false;
+	}
+
+	return (fg_batt_sts & BATT_MISSING_STS) ? true : false;
+}
+
 static int fg_cap_learning_process_full_data(struct fg_chip *chip)
 {
 	int cc_pc_val, rc = -EINVAL;
 	unsigned int cc_soc_delta_pc;
 	int64_t delta_cc_uah;
+	uint64_t temp;
+	bool batt_missing = is_battery_missing(chip);
+
+	if (batt_missing) {
+		pr_err("Battery is missing!\n");
+		goto fail;
+	}
 
 	if (!chip->learning_data.active)
 		goto fail;
@@ -2940,9 +3573,8 @@
 		goto fail;
 	}
 
-	cc_soc_delta_pc = DIV_ROUND_CLOSEST(
-			abs(cc_pc_val - chip->learning_data.init_cc_pc_val)
-			* 100, FULL_PERCENT_28BIT);
+	temp = abs(cc_pc_val - chip->learning_data.init_cc_pc_val);
+	cc_soc_delta_pc = DIV_ROUND_CLOSEST_ULL(temp * 100, FULL_PERCENT_28BIT);
 
 	delta_cc_uah = div64_s64(
 			chip->learning_data.learned_cc_uah * cc_soc_delta_pc,
@@ -2950,8 +3582,11 @@
 	chip->learning_data.cc_uah = delta_cc_uah + chip->learning_data.cc_uah;
 
 	if (fg_debug_mask & FG_AGING)
-		pr_info("current cc_soc=%d cc_soc_pc=%d total_cc_uah = %lld\n",
+		pr_info("current cc_soc=%d cc_soc_pc=%d init_cc_pc_val=%d delta_cc_uah=%lld learned_cc_uah=%lld total_cc_uah = %lld\n",
 				cc_pc_val, cc_soc_delta_pc,
+				chip->learning_data.init_cc_pc_val,
+				delta_cc_uah,
+				chip->learning_data.learned_cc_uah,
 				chip->learning_data.cc_uah);
 
 	return 0;
@@ -3044,6 +3679,12 @@
 {
 	int16_t cc_mah;
 	int rc;
+	bool batt_missing = is_battery_missing(chip);
+
+	if (batt_missing) {
+		pr_err("Battery is missing!\n");
+		return;
+	}
 
 	cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000);
 
@@ -3065,6 +3706,12 @@
 static void fg_cap_learning_post_process(struct fg_chip *chip)
 {
 	int64_t max_inc_val, min_dec_val, old_cap;
+	bool batt_missing = is_battery_missing(chip);
+
+	if (batt_missing) {
+		pr_err("Battery is missing!\n");
+		return;
+	}
 
 	max_inc_val = chip->learning_data.learned_cc_uah
 			* (1000 + chip->learning_data.max_increment);
@@ -3083,6 +3730,32 @@
 		chip->learning_data.learned_cc_uah =
 			chip->learning_data.cc_uah;
 
+	if (chip->learning_data.max_cap_limit) {
+		max_inc_val = (int64_t)chip->nom_cap_uah * (1000 +
+				chip->learning_data.max_cap_limit);
+		max_inc_val = div64_u64(max_inc_val, 1000);
+		if (chip->learning_data.cc_uah > max_inc_val) {
+			if (fg_debug_mask & FG_AGING)
+				pr_info("learning capacity %lld goes above max limit %lld\n",
+					chip->learning_data.cc_uah,
+					max_inc_val);
+			chip->learning_data.learned_cc_uah = max_inc_val;
+		}
+	}
+
+	if (chip->learning_data.min_cap_limit) {
+		min_dec_val = (int64_t)chip->nom_cap_uah * (1000 -
+				chip->learning_data.min_cap_limit);
+		min_dec_val = div64_u64(min_dec_val, 1000);
+		if (chip->learning_data.cc_uah < min_dec_val) {
+			if (fg_debug_mask & FG_AGING)
+				pr_info("learning capacity %lld goes below min limit %lld\n",
+					chip->learning_data.cc_uah,
+					min_dec_val);
+			chip->learning_data.learned_cc_uah = min_dec_val;
+		}
+	}
+
 	fg_cap_learning_save_data(chip);
 	if (fg_debug_mask & FG_AGING)
 		pr_info("final cc_uah = %lld, learned capacity %lld -> %lld uah\n",
@@ -3142,7 +3815,7 @@
 		if (battery_soc * 100 / FULL_PERCENT_3B
 				> chip->learning_data.max_start_soc) {
 			if (fg_debug_mask & FG_AGING)
-				pr_info("battery soc too low (%d < %d), aborting\n",
+				pr_info("battery soc too high (%d > %d), aborting\n",
 					battery_soc * 100 / FULL_PERCENT_3B,
 					chip->learning_data.max_start_soc);
 			fg_mem_release(chip);
@@ -3226,6 +3899,17 @@
 		}
 
 		fg_cap_learning_stop(chip);
+	} else if (chip->status == POWER_SUPPLY_STATUS_FULL) {
+		if (chip->wa_flag & USE_CC_SOC_REG) {
+			/* reset SW_CC_SOC register to 100% upon charge_full */
+			rc = fg_mem_write(chip, (u8 *)&cc_pc_100,
+				CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
+			if (rc)
+				pr_err("Failed to reset CC_SOC_REG rc=%d\n",
+								rc);
+			else if (fg_debug_mask & FG_STATUS)
+				pr_info("Reset SW_CC_SOC to full value\n");
+		}
 	}
 
 fail:
@@ -3323,7 +4007,14 @@
 				struct fg_chip,
 				status_change_work);
 	unsigned long current_time = 0;
-	int cc_soc, rc, capacity = get_prop_capacity(chip);
+	int cc_soc, batt_soc, rc, capacity = get_prop_capacity(chip);
+	bool batt_missing = is_battery_missing(chip);
+
+	if (batt_missing) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("Battery is missing\n");
+		return;
+	}
 
 	if (chip->esr_pulse_tune_en) {
 		fg_stay_awake(&chip->esr_extract_wakeup_source);
@@ -3343,19 +4034,34 @@
 	}
 	if (chip->status == POWER_SUPPLY_STATUS_FULL ||
 			chip->status == POWER_SUPPLY_STATUS_CHARGING) {
-		if (!chip->vbat_low_irq_enabled) {
+		if (!chip->vbat_low_irq_enabled &&
+				!chip->use_vbat_low_empty_soc) {
 			enable_irq(chip->batt_irq[VBATT_LOW].irq);
 			enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
 			chip->vbat_low_irq_enabled = true;
 		}
+
+		if (!chip->full_soc_irq_enabled) {
+			enable_irq(chip->soc_irq[FULL_SOC].irq);
+			enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+			chip->full_soc_irq_enabled = true;
+		}
+
 		if (!!(chip->wa_flag & PULSE_REQUEST_WA) && capacity == 100)
 			fg_configure_soc(chip);
 	} else if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
-		if (chip->vbat_low_irq_enabled) {
+		if (chip->vbat_low_irq_enabled &&
+				!chip->use_vbat_low_empty_soc) {
 			disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
 			disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
 			chip->vbat_low_irq_enabled = false;
 		}
+
+		if (chip->full_soc_irq_enabled) {
+			disable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+			disable_irq_nosync(chip->soc_irq[FULL_SOC].irq);
+			chip->full_soc_irq_enabled = false;
+		}
 	}
 	fg_cap_learning_check(chip);
 	schedule_work(&chip->update_esr_work);
@@ -3368,6 +4074,42 @@
 	}
 
 	if (chip->prev_status != chip->status && chip->last_sram_update_time) {
+		/*
+		 * Reset SW_CC_SOC to a value based off battery SOC when
+		 * the device is discharging.
+		 */
+		if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+			batt_soc = get_battery_soc_raw(chip);
+			if (!batt_soc)
+				return;
+
+			batt_soc = div64_s64((int64_t)batt_soc *
+					FULL_PERCENT_28BIT, FULL_PERCENT_3B);
+			rc = fg_mem_write(chip, (u8 *)&batt_soc,
+				CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
+			if (rc)
+				pr_err("Failed to reset CC_SOC_REG rc=%d\n",
+									rc);
+			else if (fg_debug_mask & FG_STATUS)
+				pr_info("Reset SW_CC_SOC to %x\n", batt_soc);
+		}
+
+		/*
+		 * Schedule the update_temp_work whenever there is a status
+		 * change. This is essential for applying the slope limiter
+		 * coefficients when that feature is enabled.
+		 */
+		if (chip->last_temp_update_time && chip->soc_slope_limiter_en) {
+			cancel_delayed_work_sync(&chip->update_temp_work);
+			schedule_delayed_work(&chip->update_temp_work,
+				msecs_to_jiffies(0));
+		}
+
+		if (chip->dischg_gain.enable) {
+			fg_stay_awake(&chip->dischg_gain_wakeup_source);
+			schedule_work(&chip->dischg_gain_work);
+		}
+
 		get_current_time(&current_time);
 		/*
 		 * When charging status changes, update SRAM parameters if it
@@ -3393,10 +4135,10 @@
 	}
 	if ((chip->wa_flag & USE_CC_SOC_REG) && chip->bad_batt_detection_en
 			&& chip->safety_timer_expired) {
-		chip->sw_cc_soc_data.delta_soc =
-			DIV_ROUND_CLOSEST(abs(cc_soc -
-					chip->sw_cc_soc_data.init_cc_soc)
-					* 100, FULL_PERCENT_28BIT);
+		uint64_t delta_cc_soc = abs(cc_soc -
+					chip->sw_cc_soc_data.init_cc_soc);
+		chip->sw_cc_soc_data.delta_soc = DIV_ROUND_CLOSEST_ULL(
+				delta_cc_soc * 100, FULL_PERCENT_28BIT);
 		chip->sw_cc_soc_data.full_capacity =
 			chip->sw_cc_soc_data.delta_soc +
 			chip->sw_cc_soc_data.init_sys_soc;
@@ -3539,6 +4281,395 @@
 	return rc;
 }
 
+static int fg_restore_cc_soc(struct fg_chip *chip)
+{
+	int rc;
+
+	if (!chip->use_last_cc_soc || !chip->last_cc_soc)
+		return 0;
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Restoring cc_soc: %lld\n", chip->last_cc_soc);
+
+	rc = fg_mem_write(chip, (u8 *)&chip->last_cc_soc,
+			fg_data[FG_DATA_CC_CHARGE].address, 4,
+			fg_data[FG_DATA_CC_CHARGE].offset, 0);
+	if (rc)
+		pr_err("failed to update CC_SOC rc=%d\n", rc);
+	else
+		chip->use_last_cc_soc = false;
+
+	return rc;
+}
+
+#define SRAM_MONOTONIC_SOC_REG		0x574
+#define SRAM_MONOTONIC_SOC_OFFSET	2
+static int fg_restore_soc(struct fg_chip *chip)
+{
+	int rc;
+	u16 msoc;
+
+	if (chip->use_last_soc && chip->last_soc)
+		msoc = DIV_ROUND_CLOSEST(chip->last_soc * 0xFFFF,
+				FULL_SOC_RAW);
+	else
+		return 0;
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Restored soc: %d\n", msoc);
+
+	rc = fg_mem_write(chip, (u8 *)&msoc, SRAM_MONOTONIC_SOC_REG, 2,
+			SRAM_MONOTONIC_SOC_OFFSET, 0);
+	if (rc)
+		pr_err("failed to write M_SOC_REG rc=%d\n", rc);
+
+	return rc;
+}
+
+#define NOM_CAP_REG			0x4F4
+#define CAPACITY_DELTA_DECIPCT		500
+static int load_battery_aging_data(struct fg_chip *chip)
+{
+	int rc = 0;
+	u8 buffer[2];
+	int16_t cc_mah;
+	int64_t delta_cc_uah, pct_nom_cap_uah;
+
+	rc = fg_mem_read(chip, buffer, NOM_CAP_REG, 2, 0, 0);
+	if (rc) {
+		pr_err("Failed to read nominal capacitance: %d\n", rc);
+		goto out;
+	}
+
+	chip->nom_cap_uah = bcap_uah_2b(buffer);
+	chip->actual_cap_uah = chip->nom_cap_uah;
+
+	if (chip->learning_data.learned_cc_uah == 0) {
+		chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
+		fg_cap_learning_save_data(chip);
+	} else if (chip->learning_data.feedback_on) {
+		delta_cc_uah = abs(chip->learning_data.learned_cc_uah -
+					chip->nom_cap_uah);
+		pct_nom_cap_uah = div64_s64((int64_t)chip->nom_cap_uah *
+				CAPACITY_DELTA_DECIPCT, 1000);
+		/*
+		 * If the learned capacity is out of range, say by 50%
+		 * from the nominal capacity, then overwrite the learned
+		 * capacity with the nominal capacity.
+		 */
+		if (chip->nom_cap_uah && delta_cc_uah > pct_nom_cap_uah) {
+			if (fg_debug_mask & FG_AGING) {
+				pr_info("learned_cc_uah: %lld is higher than expected\n",
+					chip->learning_data.learned_cc_uah);
+				pr_info("Capping it to nominal:%d\n",
+					chip->nom_cap_uah);
+			}
+			chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
+			fg_cap_learning_save_data(chip);
+		} else {
+			cc_mah = div64_s64(chip->learning_data.learned_cc_uah,
+					1000);
+			rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
+			if (rc)
+				pr_err("Error in restoring cc_soc_coeff, rc:%d\n",
+					rc);
+		}
+	}
+out:
+	return rc;
+}
+
+static void fg_restore_battery_info(struct fg_chip *chip)
+{
+	int rc;
+	char buf[4] = {0, 0, 0, 0};
+
+	chip->last_soc = DIV_ROUND_CLOSEST(chip->batt_info[BATT_INFO_SOC] *
+				FULL_SOC_RAW, FULL_CAPACITY);
+	chip->last_cc_soc = div64_s64((int64_t)chip->last_soc *
+				FULL_PERCENT_28BIT, FULL_SOC_RAW);
+	chip->use_last_soc = true;
+	chip->use_last_cc_soc = true;
+	rc = fg_restore_soc(chip);
+	if (rc) {
+		pr_err("Error in restoring soc, rc=%d\n", rc);
+		goto out;
+	}
+
+	rc = fg_restore_cc_soc(chip);
+	if (rc) {
+		pr_err("Error in restoring cc_soc, rc=%d\n", rc);
+		goto out;
+	}
+
+	rc = fg_mem_write(chip, buf,
+			fg_data[FG_DATA_VINT_ERR].address,
+			fg_data[FG_DATA_VINT_ERR].len,
+			fg_data[FG_DATA_VINT_ERR].offset, 0);
+	if (rc) {
+		pr_err("Failed to write to VINT_ERR, rc=%d\n", rc);
+		goto out;
+	}
+
+	chip->learning_data.learned_cc_uah = chip->batt_info[BATT_INFO_FCC];
+	rc = load_battery_aging_data(chip);
+	if (rc) {
+		pr_err("Failed to load battery aging data, rc:%d\n", rc);
+		goto out;
+	}
+
+	if (chip->power_supply_registered)
+		power_supply_changed(chip->bms_psy);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Restored battery info!\n");
+
+out:
+	return;
+}
+
+#define DELTA_BATT_TEMP		30
+static bool fg_validate_battery_info(struct fg_chip *chip)
+{
+	int i, delta_pct, batt_id_kohm, batt_temp, batt_volt_mv, batt_soc;
+
+	for (i = 1; i < BATT_INFO_MAX; i++) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("batt_info[%d]: %d\n", i, chip->batt_info[i]);
+
+		if ((chip->batt_info[i] == 0 && i != BATT_INFO_TEMP) ||
+			chip->batt_info[i] == INT_MAX) {
+			if (fg_debug_mask & FG_STATUS)
+				pr_info("batt_info[%d]:%d is invalid\n", i,
+					chip->batt_info[i]);
+			return false;
+		}
+	}
+
+	batt_id_kohm = get_sram_prop_now(chip, FG_DATA_BATT_ID) / 1000;
+	if (batt_id_kohm != chip->batt_info[BATT_INFO_RES_ID]) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("batt_id(%dK) does not match the stored batt_id(%dK)\n",
+				batt_id_kohm,
+				chip->batt_info[BATT_INFO_RES_ID]);
+		return false;
+	}
+
+	batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+	if (abs(chip->batt_info[BATT_INFO_TEMP] - batt_temp) >
+			DELTA_BATT_TEMP) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("batt_temp(%d) is higher/lower than stored batt_temp(%d)\n",
+				batt_temp, chip->batt_info[BATT_INFO_TEMP]);
+		return false;
+	}
+
+	if (chip->batt_info[BATT_INFO_FCC] < 0) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("batt_fcc cannot be %d\n",
+				chip->batt_info[BATT_INFO_FCC]);
+		return false;
+	}
+
+	batt_volt_mv = get_sram_prop_now(chip, FG_DATA_VOLTAGE) / 1000;
+	batt_soc = get_monotonic_soc_raw(chip);
+	if (batt_soc != 0 && batt_soc != FULL_SOC_RAW)
+		batt_soc = DIV_ROUND_CLOSEST((batt_soc - 1) *
+				(FULL_CAPACITY - 2), FULL_SOC_RAW - 2) + 1;
+
+	if (*chip->batt_range_ocv && chip->batt_max_voltage_uv > 1000)
+		delta_pct =  DIV_ROUND_CLOSEST(abs(batt_volt_mv -
+				chip->batt_info[BATT_INFO_VOLTAGE]) * 100,
+				chip->batt_max_voltage_uv / 1000);
+	else
+		delta_pct = abs(batt_soc - chip->batt_info[BATT_INFO_SOC]);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Validating by %s batt_voltage:%d capacity:%d delta_pct:%d\n",
+			*chip->batt_range_ocv ? "OCV" : "SOC", batt_volt_mv,
+			batt_soc, delta_pct);
+
+	if (*chip->batt_range_pct && delta_pct > *chip->batt_range_pct) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("delta_pct(%d) is higher than batt_range_pct(%d)\n",
+				delta_pct, *chip->batt_range_pct);
+		return false;
+	}
+
+	return true;
+}
+
+static int fg_set_battery_info(struct fg_chip *chip, int val)
+{
+	if (chip->batt_info_id < 0 ||
+			chip->batt_info_id >= BATT_INFO_MAX) {
+		pr_err("Invalid batt_info_id %d\n", chip->batt_info_id);
+		chip->batt_info_id = 0;
+		return -EINVAL;
+	}
+
+	if (chip->batt_info_id == BATT_INFO_NOTIFY && val == INT_MAX - 1) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("Notified from userspace\n");
+		if (chip->batt_info_restore && !chip->ima_error_handling) {
+			if (!fg_validate_battery_info(chip)) {
+				if (fg_debug_mask & FG_STATUS)
+					pr_info("Validating battery info failed\n");
+			} else {
+				fg_restore_battery_info(chip);
+			}
+		}
+	}
+
+	chip->batt_info[chip->batt_info_id] = val;
+	return 0;
+}
+
+static enum power_supply_property fg_power_props[] = {
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_CAPACITY_RAW,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_VOLTAGE_OCV,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+	POWER_SUPPLY_PROP_CHARGE_COUNTER,
+	POWER_SUPPLY_PROP_CHARGE_NOW,
+	POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+	POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
+	POWER_SUPPLY_PROP_CHARGE_FULL,
+	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_COOL_TEMP,
+	POWER_SUPPLY_PROP_WARM_TEMP,
+	POWER_SUPPLY_PROP_RESISTANCE,
+	POWER_SUPPLY_PROP_RESISTANCE_ID,
+	POWER_SUPPLY_PROP_BATTERY_TYPE,
+	POWER_SUPPLY_PROP_UPDATE_NOW,
+	POWER_SUPPLY_PROP_ESR_COUNT,
+	POWER_SUPPLY_PROP_VOLTAGE_MIN,
+	POWER_SUPPLY_PROP_CYCLE_COUNT,
+	POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+	POWER_SUPPLY_PROP_HI_POWER,
+	POWER_SUPPLY_PROP_SOC_REPORTING_READY,
+	POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE,
+	POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION,
+	POWER_SUPPLY_PROP_BATTERY_INFO,
+	POWER_SUPPLY_PROP_BATTERY_INFO_ID,
+};
+
+static int fg_power_get_property(struct power_supply *psy,
+				       enum power_supply_property psp,
+				       union power_supply_propval *val)
+{
+	struct fg_chip *chip =  power_supply_get_drvdata(psy);
+	bool vbatt_low_sts;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_BATTERY_TYPE:
+		if (chip->battery_missing)
+			val->strval = missing_batt_type;
+		else if (chip->fg_restarting)
+			val->strval = loading_batt_type;
+		else
+			val->strval = chip->batt_type;
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		val->intval = get_prop_capacity(chip);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY_RAW:
+		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_SOC);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW_ERROR:
+		val->intval = get_sram_prop_now(chip, FG_DATA_VINT_ERR);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		val->intval = get_sram_prop_now(chip, FG_DATA_CURRENT);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		val->intval = get_sram_prop_now(chip, FG_DATA_VOLTAGE);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+		val->intval = get_sram_prop_now(chip, FG_DATA_OCV);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+		val->intval = chip->batt_max_voltage_uv;
+		break;
+	case POWER_SUPPLY_PROP_TEMP:
+		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+		break;
+	case POWER_SUPPLY_PROP_COOL_TEMP:
+		val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_COLD);
+		break;
+	case POWER_SUPPLY_PROP_WARM_TEMP:
+		val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_HOT);
+		break;
+	case POWER_SUPPLY_PROP_RESISTANCE:
+		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR);
+		break;
+	case POWER_SUPPLY_PROP_ESR_COUNT:
+		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR_COUNT);
+		break;
+	case POWER_SUPPLY_PROP_CYCLE_COUNT:
+		val->intval = fg_get_cycle_count(chip);
+		break;
+	case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+		val->intval = chip->cyc_ctr.id;
+		break;
+	case POWER_SUPPLY_PROP_RESISTANCE_ID:
+		val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ID);
+		break;
+	case POWER_SUPPLY_PROP_UPDATE_NOW:
+		val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+		if (!fg_get_vbatt_status(chip, &vbatt_low_sts))
+			val->intval = (int)vbatt_low_sts;
+		else
+			val->intval = 1;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+		val->intval = chip->nom_cap_uah;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL:
+		val->intval = chip->learning_data.learned_cc_uah;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW:
+		val->intval = chip->learning_data.cc_uah;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
+		val->intval = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+		val->intval = fg_get_current_cc(chip);
+		break;
+	case POWER_SUPPLY_PROP_HI_POWER:
+		val->intval = !!chip->bcl_lpm_disabled;
+		break;
+	case POWER_SUPPLY_PROP_SOC_REPORTING_READY:
+		val->intval = !!chip->soc_reporting_ready;
+		break;
+	case POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE:
+		val->intval = !chip->allow_false_negative_isense;
+		break;
+	case POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION:
+		val->intval = chip->use_soft_jeita_irq;
+		break;
+	case POWER_SUPPLY_PROP_BATTERY_INFO:
+		if (chip->batt_info_id < 0 ||
+				chip->batt_info_id >= BATT_INFO_MAX)
+			return -EINVAL;
+		val->intval = chip->batt_info[chip->batt_info_id];
+		break;
+	case POWER_SUPPLY_PROP_BATTERY_INFO_ID:
+		val->intval = chip->batt_info_id;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int fg_power_set_property(struct power_supply *psy,
 				  enum power_supply_property psp,
 				  const union power_supply_propval *val)
@@ -3557,6 +4688,67 @@
 		if (val->intval)
 			update_sram_data(chip, &unused);
 		break;
+	case POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE:
+		rc = set_prop_ignore_false_negative_isense(chip, !!val->intval);
+		if (rc)
+			pr_err("set_prop_ignore_false_negative_isense failed, rc=%d\n",
+							rc);
+		else
+			chip->allow_false_negative_isense = !val->intval;
+		break;
+	case POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION:
+		if (chip->use_soft_jeita_irq == !!val->intval) {
+			pr_debug("JEITA irq %s, ignore!\n",
+				chip->use_soft_jeita_irq ?
+				"enabled" : "disabled");
+			break;
+		}
+		chip->use_soft_jeita_irq = !!val->intval;
+		if (chip->use_soft_jeita_irq) {
+			if (chip->batt_irq[JEITA_SOFT_COLD].disabled) {
+				enable_irq(
+					chip->batt_irq[JEITA_SOFT_COLD].irq);
+				chip->batt_irq[JEITA_SOFT_COLD].disabled =
+								false;
+			}
+			if (!chip->batt_irq[JEITA_SOFT_COLD].wakeup) {
+				enable_irq_wake(
+					chip->batt_irq[JEITA_SOFT_COLD].irq);
+				chip->batt_irq[JEITA_SOFT_COLD].wakeup = true;
+			}
+			if (chip->batt_irq[JEITA_SOFT_HOT].disabled) {
+				enable_irq(
+					chip->batt_irq[JEITA_SOFT_HOT].irq);
+				chip->batt_irq[JEITA_SOFT_HOT].disabled = false;
+			}
+			if (!chip->batt_irq[JEITA_SOFT_HOT].wakeup) {
+				enable_irq_wake(
+					chip->batt_irq[JEITA_SOFT_HOT].irq);
+				chip->batt_irq[JEITA_SOFT_HOT].wakeup = true;
+			}
+		} else {
+			if (chip->batt_irq[JEITA_SOFT_COLD].wakeup) {
+				disable_irq_wake(
+					chip->batt_irq[JEITA_SOFT_COLD].irq);
+				chip->batt_irq[JEITA_SOFT_COLD].wakeup = false;
+			}
+			if (!chip->batt_irq[JEITA_SOFT_COLD].disabled) {
+				disable_irq_nosync(
+					chip->batt_irq[JEITA_SOFT_COLD].irq);
+				chip->batt_irq[JEITA_SOFT_COLD].disabled = true;
+			}
+			if (chip->batt_irq[JEITA_SOFT_HOT].wakeup) {
+				disable_irq_wake(
+					chip->batt_irq[JEITA_SOFT_HOT].irq);
+				chip->batt_irq[JEITA_SOFT_HOT].wakeup = false;
+			}
+			if (!chip->batt_irq[JEITA_SOFT_HOT].disabled) {
+				disable_irq_nosync(
+					chip->batt_irq[JEITA_SOFT_HOT].irq);
+				chip->batt_irq[JEITA_SOFT_HOT].disabled = true;
+			}
+		}
+		break;
 	case POWER_SUPPLY_PROP_STATUS:
 		chip->prev_status = chip->status;
 		chip->status = val->intval;
@@ -3599,6 +4791,12 @@
 			schedule_work(&chip->bcl_hi_power_work);
 		}
 		break;
+	case POWER_SUPPLY_PROP_BATTERY_INFO:
+		rc = fg_set_battery_info(chip, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_BATTERY_INFO_ID:
+		chip->batt_info_id = val->intval;
+		break;
 	default:
 		return -EINVAL;
 	};
@@ -3613,6 +4811,8 @@
 	case POWER_SUPPLY_PROP_COOL_TEMP:
 	case POWER_SUPPLY_PROP_WARM_TEMP:
 	case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+	case POWER_SUPPLY_PROP_BATTERY_INFO:
+	case POWER_SUPPLY_PROP_BATTERY_INFO_ID:
 		return 1;
 	default:
 		break;
@@ -3807,21 +5007,197 @@
 	fg_relax(&chip->gain_comp_wakeup_source);
 }
 
-#define BATT_MISSING_STS BIT(6)
-static bool is_battery_missing(struct fg_chip *chip)
+static void cc_soc_store_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work, struct fg_chip,
+					cc_soc_store_work);
+	int cc_soc_pct;
+
+	if (!chip->nom_cap_uah) {
+		pr_err("nom_cap_uah zero!\n");
+		fg_relax(&chip->cc_soc_wakeup_source);
+		return;
+	}
+
+	cc_soc_pct = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
+	cc_soc_pct = div64_s64(cc_soc_pct * 100,
+				chip->nom_cap_uah);
+	chip->last_cc_soc = div64_s64((int64_t)chip->last_soc *
+				FULL_PERCENT_28BIT, FULL_SOC_RAW);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("cc_soc_pct: %d last_cc_soc: %lld\n", cc_soc_pct,
+			chip->last_cc_soc);
+
+	if (fg_reset_on_lockup && (chip->cc_soc_limit_pct > 0 &&
+			cc_soc_pct >= chip->cc_soc_limit_pct)) {
+		pr_err("CC_SOC out of range\n");
+		fg_check_ima_error_handling(chip);
+	}
+
+	fg_relax(&chip->cc_soc_wakeup_source);
+}
+
+#define HARD_JEITA_ALARM_CHECK_NS	10000000000
+static enum alarmtimer_restart fg_hard_jeita_alarm_cb(struct alarm *alarm,
+						ktime_t now)
+{
+	struct fg_chip *chip = container_of(alarm,
+			struct fg_chip, hard_jeita_alarm);
+	int rc, health = POWER_SUPPLY_HEALTH_UNKNOWN;
+	u8 regval;
+	bool batt_hot, batt_cold;
+	union power_supply_propval val = {0, };
+
+	if (!is_usb_present(chip)) {
+		pr_debug("USB plugged out, stop the timer!\n");
+		return ALARMTIMER_NORESTART;
+	}
+
+	rc = fg_read(chip, &regval, BATT_INFO_STS(chip->batt_base), 1);
+	if (rc) {
+		pr_err("read batt_sts failed, rc=%d\n", rc);
+		goto recheck;
+	}
+
+	batt_hot = !!(regval & JEITA_HARD_HOT_RT_STS);
+	batt_cold = !!(regval & JEITA_HARD_COLD_RT_STS);
+	if (batt_hot && batt_cold) {
+		pr_debug("Hot && cold can't co-exist\n");
+		goto recheck;
+	}
+
+	if ((batt_hot == chip->batt_hot) && (batt_cold == chip->batt_cold)) {
+		pr_debug("battery JEITA state not changed, ignore\n");
+		goto recheck;
+	}
+
+	if (batt_cold != chip->batt_cold) {
+		/* cool --> cold */
+		if (chip->batt_cool) {
+			chip->batt_cool = false;
+			chip->batt_cold = true;
+			health = POWER_SUPPLY_HEALTH_COLD;
+		} else if (chip->batt_cold) { /* cold --> cool */
+			chip->batt_cool = true;
+			chip->batt_cold = false;
+			health = POWER_SUPPLY_HEALTH_COOL;
+		}
+	}
+
+	if (batt_hot != chip->batt_hot) {
+		/* warm --> hot */
+		if (chip->batt_warm) {
+			chip->batt_warm = false;
+			chip->batt_hot = true;
+			health = POWER_SUPPLY_HEALTH_OVERHEAT;
+		} else if (chip->batt_hot) { /* hot --> warm */
+			chip->batt_hot = false;
+			chip->batt_warm = true;
+			health = POWER_SUPPLY_HEALTH_WARM;
+		}
+	}
+
+	if (health != POWER_SUPPLY_HEALTH_UNKNOWN) {
+		pr_debug("FG report battery health: %d\n", health);
+		val.intval = health;
+		rc = power_supply_set_property(chip->batt_psy,
+				POWER_SUPPLY_PROP_HEALTH, &val);
+		if (rc)
+			pr_err("Set batt_psy health: %d failed\n", health);
+	}
+
+recheck:
+	alarm_forward_now(alarm, ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS));
+	return ALARMTIMER_RESTART;
+}
+
+#define BATT_SOFT_COLD_STS	BIT(0)
+#define BATT_SOFT_HOT_STS	BIT(1)
+static irqreturn_t fg_jeita_soft_hot_irq_handler(int irq, void *_chip)
 {
 	int rc;
-	u8 fg_batt_sts;
+	struct fg_chip *chip = _chip;
+	u8 regval;
+	bool batt_warm;
+	union power_supply_propval val = {0, };
 
-	rc = fg_read(chip, &fg_batt_sts,
-				 INT_RT_STS(chip->batt_base), 1);
+	if (!is_charger_available(chip))
+		return IRQ_HANDLED;
+
+	rc = fg_read(chip, &regval, INT_RT_STS(chip->batt_base), 1);
 	if (rc) {
 		pr_err("spmi read failed: addr=%03X, rc=%d\n",
 				INT_RT_STS(chip->batt_base), rc);
-		return false;
+		return IRQ_HANDLED;
 	}
 
-	return (fg_batt_sts & BATT_MISSING_STS) ? true : false;
+	batt_warm = !!(regval & BATT_SOFT_HOT_STS);
+	if (chip->batt_warm == batt_warm) {
+		pr_debug("warm state not change, ignore!\n");
+		return IRQ_HANDLED;
+	}
+
+	chip->batt_warm = batt_warm;
+	if (batt_warm) {
+		val.intval = POWER_SUPPLY_HEALTH_WARM;
+		power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_HEALTH, &val);
+		/* kick the alarm timer for hard hot polling */
+		alarm_start_relative(&chip->hard_jeita_alarm,
+				ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS));
+	} else {
+		val.intval = POWER_SUPPLY_HEALTH_GOOD;
+		 power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_HEALTH, &val);
+		/* cancel the alarm timer */
+		alarm_try_to_cancel(&chip->hard_jeita_alarm);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_jeita_soft_cold_irq_handler(int irq, void *_chip)
+{
+	int rc;
+	struct fg_chip *chip = _chip;
+	u8 regval;
+	bool batt_cool;
+	union power_supply_propval val = {0, };
+
+	if (!is_charger_available(chip))
+		return IRQ_HANDLED;
+
+	rc = fg_read(chip, &regval, INT_RT_STS(chip->batt_base), 1);
+	if (rc) {
+		pr_err("spmi read failed: addr=%03X, rc=%d\n",
+				INT_RT_STS(chip->batt_base), rc);
+		return IRQ_HANDLED;
+	}
+
+	batt_cool = !!(regval & BATT_SOFT_COLD_STS);
+	if (chip->batt_cool == batt_cool) {
+		pr_debug("cool state not change, ignore\n");
+		return IRQ_HANDLED;
+	}
+
+	chip->batt_cool = batt_cool;
+	if (batt_cool) {
+		val.intval = POWER_SUPPLY_HEALTH_COOL;
+		power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_HEALTH, &val);
+		/* kick the alarm timer for hard cold polling */
+		alarm_start_relative(&chip->hard_jeita_alarm,
+				ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS));
+	} else {
+		val.intval = POWER_SUPPLY_HEALTH_GOOD;
+		power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_HEALTH, &val);
+		/* cancel the alarm timer */
+		alarm_try_to_cancel(&chip->hard_jeita_alarm);
+	}
+
+	return IRQ_HANDLED;
 }
 
 #define SOC_FIRST_EST_DONE	BIT(5)
@@ -3841,21 +5217,40 @@
 	return (fg_soc_sts & SOC_FIRST_EST_DONE) ? true : false;
 }
 
+#define FG_EMPTY_DEBOUNCE_MS	1500
 static irqreturn_t fg_vbatt_low_handler(int irq, void *_chip)
 {
 	struct fg_chip *chip = _chip;
-	int rc;
 	bool vbatt_low_sts;
 
 	if (fg_debug_mask & FG_IRQS)
 		pr_info("vbatt-low triggered\n");
 
-	if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
-		rc = fg_get_vbatt_status(chip, &vbatt_low_sts);
-		if (rc) {
-			pr_err("error in reading vbatt_status, rc:%d\n", rc);
+	/* handle empty soc based on vbatt-low interrupt */
+	if (chip->use_vbat_low_empty_soc) {
+		if (fg_get_vbatt_status(chip, &vbatt_low_sts))
 			goto out;
+
+		if (vbatt_low_sts) {
+			if (fg_debug_mask & FG_IRQS)
+				pr_info("Vbatt is low\n");
+			disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+			disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+			chip->vbat_low_irq_enabled = false;
+			fg_stay_awake(&chip->empty_check_wakeup_source);
+			schedule_delayed_work(&chip->check_empty_work,
+				msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS));
+		} else {
+			if (fg_debug_mask & FG_IRQS)
+				pr_info("Vbatt is high\n");
+			chip->soc_empty = false;
 		}
+		goto out;
+	}
+
+	if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+		if (fg_get_vbatt_status(chip, &vbatt_low_sts))
+			goto out;
 		if (!vbatt_low_sts && chip->vbat_low_irq_enabled) {
 			if (fg_debug_mask & FG_IRQS)
 				pr_info("disabling vbatt_low irq\n");
@@ -3876,8 +5271,10 @@
 	bool batt_missing = is_battery_missing(chip);
 
 	if (batt_missing) {
+		fg_cap_learning_stop(chip);
 		chip->battery_missing = true;
 		chip->profile_loaded = false;
+		chip->soc_reporting_ready = false;
 		chip->batt_type = default_batt_type;
 		mutex_lock(&chip->cyc_ctr.lock);
 		if (fg_debug_mask & FG_IRQS)
@@ -3885,17 +5282,10 @@
 		clear_cycle_counter(chip);
 		mutex_unlock(&chip->cyc_ctr.lock);
 	} else {
-		if (!chip->use_otp_profile) {
-			reinit_completion(&chip->batt_id_avail);
-			reinit_completion(&chip->first_soc_done);
-			schedule_delayed_work(&chip->batt_profile_init, 0);
-			cancel_delayed_work(&chip->update_sram_data);
-			schedule_delayed_work(
-				&chip->update_sram_data,
-				msecs_to_jiffies(0));
-		} else {
+		if (!chip->use_otp_profile)
+			fg_handle_battery_insertion(chip);
+		else
 			chip->battery_missing = false;
-		}
 	}
 
 	if (fg_debug_mask & FG_IRQS)
@@ -3943,7 +5333,7 @@
 {
 	struct fg_chip *chip = _chip;
 	u8 soc_rt_sts;
-	int rc;
+	int rc, msoc;
 
 	rc = fg_read(chip, &soc_rt_sts, INT_RT_STS(chip->soc_base), 1);
 	if (rc) {
@@ -3954,6 +5344,37 @@
 	if (fg_debug_mask & FG_IRQS)
 		pr_info("triggered 0x%x\n", soc_rt_sts);
 
+	if (chip->dischg_gain.enable) {
+		fg_stay_awake(&chip->dischg_gain_wakeup_source);
+		schedule_work(&chip->dischg_gain_work);
+	}
+
+	if (chip->soc_slope_limiter_en) {
+		fg_stay_awake(&chip->slope_limit_wakeup_source);
+		schedule_work(&chip->slope_limiter_work);
+	}
+
+	/* Backup last soc every delta soc interrupt */
+	chip->use_last_soc = false;
+	if (fg_reset_on_lockup) {
+		if (!chip->ima_error_handling)
+			chip->last_soc = get_monotonic_soc_raw(chip);
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("last_soc: %d\n", chip->last_soc);
+
+		fg_stay_awake(&chip->cc_soc_wakeup_source);
+		schedule_work(&chip->cc_soc_store_work);
+	}
+
+	if (chip->use_vbat_low_empty_soc) {
+		msoc = get_monotonic_soc_raw(chip);
+		if (msoc == 0 || chip->soc_empty) {
+			fg_stay_awake(&chip->empty_check_wakeup_source);
+			schedule_delayed_work(&chip->check_empty_work,
+				msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS));
+		}
+	}
+
 	schedule_work(&chip->battery_age_work);
 
 	if (chip->power_supply_registered)
@@ -3988,7 +5409,6 @@
 	return IRQ_HANDLED;
 }
 
-#define FG_EMPTY_DEBOUNCE_MS	1500
 static irqreturn_t fg_empty_soc_irq_handler(int irq, void *_chip)
 {
 	struct fg_chip *chip = _chip;
@@ -4100,16 +5520,15 @@
 	fg_relax(&chip->resume_soc_wakeup_source);
 }
 
-
 #define OCV_COEFFS_START_REG		0x4C0
 #define OCV_JUNCTION_REG		0x4D8
-#define NOM_CAP_REG			0x4F4
 #define CUTOFF_VOLTAGE_REG		0x40C
 #define RSLOW_CFG_REG			0x538
 #define RSLOW_CFG_OFFSET		2
 #define RSLOW_THRESH_REG		0x52C
 #define RSLOW_THRESH_OFFSET		0
-#define TEMP_RS_TO_RSLOW_OFFSET		2
+#define RS_TO_RSLOW_CHG_OFFSET		2
+#define RS_TO_RSLOW_DISCHG_OFFSET	0
 #define RSLOW_COMP_REG			0x528
 #define RSLOW_COMP_C1_OFFSET		0
 #define RSLOW_COMP_C2_OFFSET		2
@@ -4117,7 +5536,6 @@
 {
 	u8 buffer[24];
 	int rc, i;
-	int16_t cc_mah;
 
 	fg_mem_lock(chip);
 	rc = fg_mem_read(chip, buffer, OCV_COEFFS_START_REG, 24, 0, 0);
@@ -4138,30 +5556,21 @@
 				chip->ocv_coeffs[8], chip->ocv_coeffs[9],
 				chip->ocv_coeffs[10], chip->ocv_coeffs[11]);
 	}
-	rc = fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 0, 0);
-	chip->ocv_junction_p1p2 = buffer[0] * 100 / 255;
-	rc |= fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 1, 0);
-	chip->ocv_junction_p2p3 = buffer[0] * 100 / 255;
+	rc = fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 2, 0, 0);
 	if (rc) {
 		pr_err("Failed to read ocv junctions: %d\n", rc);
 		goto done;
 	}
-	rc = fg_mem_read(chip, buffer, NOM_CAP_REG, 2, 0, 0);
+
+	chip->ocv_junction_p1p2 = buffer[0] * 100 / 255;
+	chip->ocv_junction_p2p3 = buffer[1] * 100 / 255;
+
+	rc = load_battery_aging_data(chip);
 	if (rc) {
-		pr_err("Failed to read nominal capacitance: %d\n", rc);
+		pr_err("Failed to load battery aging data, rc:%d\n", rc);
 		goto done;
 	}
-	chip->nom_cap_uah = bcap_uah_2b(buffer);
-	chip->actual_cap_uah = chip->nom_cap_uah;
-	if (chip->learning_data.learned_cc_uah == 0) {
-		chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
-		fg_cap_learning_save_data(chip);
-	} else if (chip->learning_data.feedback_on) {
-		cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000);
-		rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
-		if (rc)
-			pr_err("Error in restoring cc_soc_coeff, rc:%d\n", rc);
-	}
+
 	rc = fg_mem_read(chip, buffer, CUTOFF_VOLTAGE_REG, 2, 0, 0);
 	if (rc) {
 		pr_err("Failed to read cutoff voltage: %d\n", rc);
@@ -4188,9 +5597,9 @@
 	}
 	chip->rslow_comp.rslow_thr = buffer[0];
 	rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
-			RSLOW_THRESH_OFFSET, 0);
+			RS_TO_RSLOW_CHG_OFFSET, 0);
 	if (rc) {
-		pr_err("unable to read rs to rslow: %d\n", rc);
+		pr_err("unable to read rs to rslow_chg: %d\n", rc);
 		goto done;
 	}
 	memcpy(chip->rslow_comp.rs_to_rslow, buffer, 2);
@@ -4207,6 +5616,68 @@
 	return rc;
 }
 
+static int fg_update_batt_rslow_settings(struct fg_chip *chip)
+{
+	int64_t rs_to_rslow_chg, rs_to_rslow_dischg, batt_esr, rconn_uohm;
+	u8 buffer[2];
+	int rc;
+
+	rc = fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, ESR_OFFSET, 0);
+	if (rc) {
+		pr_err("unable to read battery_esr: %d\n", rc);
+		goto done;
+	}
+	batt_esr = half_float(buffer);
+
+	rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+			RS_TO_RSLOW_DISCHG_OFFSET, 0);
+	if (rc) {
+		pr_err("unable to read rs to rslow dischg: %d\n", rc);
+		goto done;
+	}
+	rs_to_rslow_dischg = half_float(buffer);
+
+	rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+			RS_TO_RSLOW_CHG_OFFSET, 0);
+	if (rc) {
+		pr_err("unable to read rs to rslow chg: %d\n", rc);
+		goto done;
+	}
+	rs_to_rslow_chg = half_float(buffer);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("rs_rslow_chg: %lld, rs_rslow_dischg: %lld, esr: %lld\n",
+			rs_to_rslow_chg, rs_to_rslow_dischg, batt_esr);
+
+	rconn_uohm = chip->rconn_mohm * 1000;
+	rs_to_rslow_dischg = div64_s64(rs_to_rslow_dischg * batt_esr,
+					batt_esr + rconn_uohm);
+	rs_to_rslow_chg = div64_s64(rs_to_rslow_chg * batt_esr,
+					batt_esr + rconn_uohm);
+
+	half_float_to_buffer(rs_to_rslow_chg, buffer);
+	rc = fg_mem_write(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+			RS_TO_RSLOW_CHG_OFFSET, 0);
+	if (rc) {
+		pr_err("unable to write rs_to_rslow_chg: %d\n", rc);
+		goto done;
+	}
+
+	half_float_to_buffer(rs_to_rslow_dischg, buffer);
+	rc = fg_mem_write(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+			RS_TO_RSLOW_DISCHG_OFFSET, 0);
+	if (rc) {
+		pr_err("unable to write rs_to_rslow_dischg: %d\n", rc);
+		goto done;
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Modified rs_rslow_chg: %lld, rs_rslow_dischg: %lld\n",
+			rs_to_rslow_chg, rs_to_rslow_dischg);
+done:
+	return rc;
+}
+
 #define RSLOW_CFG_MASK		(BIT(2) | BIT(3) | BIT(4) | BIT(5))
 #define RSLOW_CFG_ON_VAL	(BIT(2) | BIT(3))
 #define RSLOW_THRESH_FULL_VAL	0xFF
@@ -4233,7 +5704,7 @@
 
 	half_float_to_buffer(chip->rslow_comp.chg_rs_to_rslow, buffer);
 	rc = fg_mem_write(chip, buffer,
-			TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0);
+			TEMP_RS_TO_RSLOW_REG, 2, RS_TO_RSLOW_CHG_OFFSET, 0);
 	if (rc) {
 		pr_err("unable to write rs to rslow: %d\n", rc);
 		goto done;
@@ -4286,7 +5757,7 @@
 	}
 
 	rc = fg_mem_write(chip, chip->rslow_comp.rs_to_rslow,
-			TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0);
+			TEMP_RS_TO_RSLOW_REG, 2, RS_TO_RSLOW_CHG_OFFSET, 0);
 	if (rc) {
 		pr_err("unable to write rs to rslow: %d\n", rc);
 		goto done;
@@ -4510,6 +5981,58 @@
 	fg_relax(&chip->esr_extract_wakeup_source);
 }
 
+#define KI_COEFF_MEDC_REG		0x400
+#define KI_COEFF_MEDC_OFFSET		0
+#define KI_COEFF_HIGHC_REG		0x404
+#define KI_COEFF_HIGHC_OFFSET		0
+#define DEFAULT_MEDC_VOLTAGE_GAIN	3
+#define DEFAULT_HIGHC_VOLTAGE_GAIN	2
+static void discharge_gain_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work, struct fg_chip,
+						dischg_gain_work);
+	u8 buf[2];
+	int capacity, rc, i;
+	int64_t medc_val = DEFAULT_MEDC_VOLTAGE_GAIN;
+	int64_t highc_val = DEFAULT_HIGHC_VOLTAGE_GAIN;
+
+	capacity = get_prop_capacity(chip);
+	if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+		for (i = VOLT_GAIN_MAX - 1; i >= 0; i--) {
+			if (capacity <= chip->dischg_gain.soc[i]) {
+				medc_val = chip->dischg_gain.medc_gain[i];
+				highc_val = chip->dischg_gain.highc_gain[i];
+			}
+		}
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Capacity: %d, medc_gain: %lld highc_gain: %lld\n",
+			capacity, medc_val, highc_val);
+
+	medc_val *= MICRO_UNIT;
+	half_float_to_buffer(medc_val, buf);
+	rc = fg_mem_write(chip, buf, KI_COEFF_MEDC_REG, 2,
+				KI_COEFF_MEDC_OFFSET, 0);
+	if (rc)
+		pr_err("Couldn't write to ki_coeff_medc_reg, rc=%d\n", rc);
+	else if (fg_debug_mask & FG_STATUS)
+		pr_info("Value [%x %x] written to ki_coeff_medc\n", buf[0],
+			buf[1]);
+
+	highc_val *= MICRO_UNIT;
+	half_float_to_buffer(highc_val, buf);
+	rc = fg_mem_write(chip, buf, KI_COEFF_HIGHC_REG, 2,
+				KI_COEFF_HIGHC_OFFSET, 0);
+	if (rc)
+		pr_err("Couldn't write to ki_coeff_highc_reg, rc=%d\n", rc);
+	else if (fg_debug_mask & FG_STATUS)
+		pr_info("Value [%x %x] written to ki_coeff_highc\n", buf[0],
+			buf[1]);
+
+	fg_relax(&chip->dischg_gain_wakeup_source);
+}
+
 #define LOW_LATENCY			BIT(6)
 #define BATT_PROFILE_OFFSET		0x4C0
 #define PROFILE_INTEGRITY_REG		0x53C
@@ -4529,7 +6052,7 @@
 		pr_info("restarting fuel gauge...\n");
 
 try_again:
-	if (write_profile) {
+	if (write_profile && !chip->ima_error_handling) {
 		if (!chip->charging_disabled) {
 			pr_err("Charging not yet disabled!\n");
 			return -EINVAL;
@@ -4770,7 +6293,8 @@
 #define BATTERY_PSY_WAIT_MS		2000
 static int fg_batt_profile_init(struct fg_chip *chip)
 {
-	int rc = 0, ret, len, batt_id;
+	int rc = 0, ret;
+	int len, batt_id;
 	struct device_node *node = chip->pdev->dev.of_node;
 	struct device_node *batt_node, *profile_node;
 	const char *data, *batt_type_str;
@@ -4792,6 +6316,19 @@
 		goto no_profile;
 	}
 
+	/* Check whether the charger is ready */
+	if (!is_charger_available(chip))
+		goto reschedule;
+
+	/* Disable charging for a FG cycle before calculating vbat_in_range */
+	if (!chip->charging_disabled) {
+		rc = set_prop_enable_charging(chip, false);
+		if (rc)
+			pr_err("Failed to disable charging, rc=%d\n", rc);
+
+		goto update;
+	}
+
 	batt_node = of_find_node_by_name(node, "qcom,battery-data");
 	if (!batt_node) {
 		pr_warn("No available batterydata, using OTP defaults\n");
@@ -4808,8 +6345,12 @@
 							fg_batt_type);
 	if (IS_ERR_OR_NULL(profile_node)) {
 		rc = PTR_ERR(profile_node);
-		pr_err("couldn't find profile handle %d\n", rc);
-		goto no_profile;
+		if (rc == -EPROBE_DEFER) {
+			goto reschedule;
+		} else {
+			pr_err("couldn't find profile handle rc=%d\n", rc);
+			goto no_profile;
+		}
 	}
 
 	/* read rslow compensation values if they're available */
@@ -4903,18 +6444,6 @@
 		goto no_profile;
 	}
 
-	/* Check whether the charger is ready */
-	if (!is_charger_available(chip))
-		goto reschedule;
-
-	/* Disable charging for a FG cycle before calculating vbat_in_range */
-	if (!chip->charging_disabled) {
-		rc = set_prop_enable_charging(chip, false);
-		if (rc)
-			pr_err("Failed to disable charging, rc=%d\n", rc);
-
-		goto reschedule;
-	}
 
 	vbat_in_range = get_vbat_est_diff(chip)
 			< settings[FG_MEM_VBAT_EST_DIFF].value * 1000;
@@ -4956,11 +6485,7 @@
 				chip->batt_profile, len, false);
 	}
 
-	if (chip->power_supply_registered)
-		power_supply_changed(chip->bms_psy);
-
 	memcpy(chip->batt_profile, data, len);
-
 	chip->batt_profile_len = len;
 
 	if (fg_debug_mask & FG_STATUS)
@@ -4995,6 +6520,11 @@
 		}
 	}
 
+	if (chip->rconn_mohm > 0) {
+		rc = fg_update_batt_rslow_settings(chip);
+		if (rc)
+			pr_err("Error in updating ESR, rc=%d\n", rc);
+	}
 done:
 	if (chip->charging_disabled) {
 		rc = set_prop_enable_charging(chip, true);
@@ -5008,8 +6538,22 @@
 		chip->batt_type = fg_batt_type;
 	else
 		chip->batt_type = batt_type_str;
+
+	if (chip->first_profile_loaded && fg_reset_on_lockup) {
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("restoring SRAM registers\n");
+		rc = fg_backup_sram_registers(chip, false);
+		if (rc)
+			pr_err("Couldn't restore sram registers\n");
+
+		/* Read the cycle counter back from FG SRAM */
+		if (chip->cyc_ctr.en)
+			restore_cycle_counter(chip);
+	}
+
 	chip->first_profile_loaded = true;
 	chip->profile_loaded = true;
+	chip->soc_reporting_ready = true;
 	chip->battery_missing = is_battery_missing(chip);
 	update_chg_iterm(chip);
 	update_cc_cv_setpoint(chip);
@@ -5025,8 +6569,10 @@
 	fg_relax(&chip->profile_wakeup_source);
 	pr_info("Battery SOC: %d, V: %duV\n", get_prop_capacity(chip),
 		fg_data[FG_DATA_VOLTAGE].value);
+	complete_all(&chip->fg_reset_done);
 	return rc;
 no_profile:
+	chip->soc_reporting_ready = true;
 	if (chip->charging_disabled) {
 		rc = set_prop_enable_charging(chip, true);
 		if (rc)
@@ -5039,14 +6585,15 @@
 		power_supply_changed(chip->bms_psy);
 	fg_relax(&chip->profile_wakeup_source);
 	return rc;
-reschedule:
-	schedule_delayed_work(
-		&chip->batt_profile_init,
-		msecs_to_jiffies(BATTERY_PSY_WAIT_MS));
+update:
 	cancel_delayed_work(&chip->update_sram_data);
 	schedule_delayed_work(
 		&chip->update_sram_data,
 		msecs_to_jiffies(0));
+reschedule:
+	schedule_delayed_work(
+		&chip->batt_profile_init,
+		msecs_to_jiffies(BATTERY_PSY_WAIT_MS));
 	fg_relax(&chip->profile_wakeup_source);
 	return 0;
 }
@@ -5056,14 +6603,41 @@
 	struct fg_chip *chip = container_of(work,
 				struct fg_chip,
 				check_empty_work.work);
+	bool vbatt_low_sts;
+	int msoc;
 
-	if (fg_is_batt_empty(chip)) {
+	/* handle empty soc based on vbatt-low interrupt */
+	if (chip->use_vbat_low_empty_soc) {
+		if (fg_get_vbatt_status(chip, &vbatt_low_sts))
+			goto out;
+
+		msoc = get_monotonic_soc_raw(chip);
+
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("Vbatt_low: %d, msoc: %d\n", vbatt_low_sts,
+				msoc);
+		if (vbatt_low_sts || (msoc == 0))
+			chip->soc_empty = true;
+		else
+			chip->soc_empty = false;
+
+		if (chip->power_supply_registered)
+			power_supply_changed(chip->bms_psy);
+
+		if (!chip->vbat_low_irq_enabled) {
+			enable_irq(chip->batt_irq[VBATT_LOW].irq);
+			enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+			chip->vbat_low_irq_enabled = true;
+		}
+	} else if (fg_is_batt_empty(chip)) {
 		if (fg_debug_mask & FG_STATUS)
 			pr_info("EMPTY SOC high\n");
 		chip->soc_empty = true;
 		if (chip->power_supply_registered)
 			power_supply_changed(chip->bms_psy);
 	}
+
+out:
 	fg_relax(&chip->empty_check_wakeup_source);
 }
 
@@ -5103,7 +6677,7 @@
 	int rc;
 	u8 buffer[3];
 	int bsoc;
-	int resume_soc_raw = FULL_SOC_RAW - settings[FG_MEM_RESUME_SOC].value;
+	int resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
 	bool disable = false;
 	u8 reg;
 
@@ -5318,6 +6892,98 @@
 	}								\
 } while (0)
 
+static int fg_dischg_gain_dt_init(struct fg_chip *chip)
+{
+	struct device_node *node = chip->pdev->dev.of_node;
+	struct property *prop;
+	int i, rc = 0;
+	size_t size;
+
+	prop = of_find_property(node, "qcom,fg-dischg-voltage-gain-soc",
+			NULL);
+	if (!prop) {
+		pr_err("qcom-fg-dischg-voltage-gain-soc not specified\n");
+		goto out;
+	}
+
+	size = prop->length / sizeof(u32);
+	if (size != VOLT_GAIN_MAX) {
+		pr_err("Voltage gain SOC specified is of incorrect size\n");
+		goto out;
+	}
+
+	rc = of_property_read_u32_array(node,
+		"qcom,fg-dischg-voltage-gain-soc", chip->dischg_gain.soc, size);
+	if (rc < 0) {
+		pr_err("Reading qcom-fg-dischg-voltage-gain-soc failed, rc=%d\n",
+			rc);
+		goto out;
+	}
+
+	for (i = 0; i < VOLT_GAIN_MAX; i++) {
+		if (chip->dischg_gain.soc[i] > 100) {
+			pr_err("Incorrect dischg-voltage-gain-soc\n");
+			goto out;
+		}
+	}
+
+	prop = of_find_property(node, "qcom,fg-dischg-med-voltage-gain",
+			NULL);
+	if (!prop) {
+		pr_err("qcom-fg-dischg-med-voltage-gain not specified\n");
+		goto out;
+	}
+
+	size = prop->length / sizeof(u32);
+	if (size != VOLT_GAIN_MAX) {
+		pr_err("med-voltage-gain specified is of incorrect size\n");
+		goto out;
+	}
+
+	rc = of_property_read_u32_array(node,
+		"qcom,fg-dischg-med-voltage-gain", chip->dischg_gain.medc_gain,
+		size);
+	if (rc < 0) {
+		pr_err("Reading qcom-fg-dischg-med-voltage-gain failed, rc=%d\n",
+			rc);
+		goto out;
+	}
+
+	prop = of_find_property(node, "qcom,fg-dischg-high-voltage-gain",
+			NULL);
+	if (!prop) {
+		pr_err("qcom-fg-dischg-high-voltage-gain not specified\n");
+		goto out;
+	}
+
+	size = prop->length / sizeof(u32);
+	if (size != VOLT_GAIN_MAX) {
+		pr_err("high-voltage-gain specified is of incorrect size\n");
+		goto out;
+	}
+
+	rc = of_property_read_u32_array(node,
+		"qcom,fg-dischg-high-voltage-gain",
+		chip->dischg_gain.highc_gain, size);
+	if (rc < 0) {
+		pr_err("Reading qcom-fg-dischg-high-voltage-gain failed, rc=%d\n",
+			rc);
+		goto out;
+	}
+
+	if (fg_debug_mask & FG_STATUS) {
+		for (i = 0; i < VOLT_GAIN_MAX; i++)
+			pr_info("SOC:%d MedC_Gain:%d HighC_Gain: %d\n",
+				chip->dischg_gain.soc[i],
+				chip->dischg_gain.medc_gain[i],
+				chip->dischg_gain.highc_gain[i]);
+	}
+	return 0;
+out:
+	chip->dischg_gain.enable = false;
+	return rc;
+}
+
 #define DEFAULT_EVALUATION_CURRENT_MA	1000
 static int fg_of_init(struct fg_chip *chip)
 {
@@ -5395,6 +7061,10 @@
 			"cl-max-start-capacity", rc, 15);
 	OF_READ_PROPERTY(chip->learning_data.vbat_est_thr_uv,
 			"cl-vbat-est-thr-uv", rc, 40000);
+	OF_READ_PROPERTY(chip->learning_data.max_cap_limit,
+			"cl-max-limit-deciperc", rc, 0);
+	OF_READ_PROPERTY(chip->learning_data.min_cap_limit,
+			"cl-min-limit-deciperc", rc, 0);
 	OF_READ_PROPERTY(chip->evaluation_current,
 			"aging-eval-current-ma", rc,
 			DEFAULT_EVALUATION_CURRENT_MA);
@@ -5455,6 +7125,77 @@
 	chip->esr_pulse_tune_en = of_property_read_bool(node,
 					"qcom,esr-pulse-tuning-en");
 
+	chip->soc_slope_limiter_en = of_property_read_bool(node,
+					"qcom,fg-control-slope-limiter");
+	if (chip->soc_slope_limiter_en) {
+		OF_READ_PROPERTY(chip->slope_limit_temp,
+			"fg-slope-limit-temp-threshold", rc,
+			SLOPE_LIMIT_TEMP_THRESHOLD);
+
+		OF_READ_PROPERTY(chip->slope_limit_coeffs[LOW_TEMP_CHARGE],
+			"fg-slope-limit-low-temp-chg", rc,
+			SLOPE_LIMIT_LOW_TEMP_CHG);
+
+		OF_READ_PROPERTY(chip->slope_limit_coeffs[HIGH_TEMP_CHARGE],
+			"fg-slope-limit-high-temp-chg", rc,
+			SLOPE_LIMIT_HIGH_TEMP_CHG);
+
+		OF_READ_PROPERTY(chip->slope_limit_coeffs[LOW_TEMP_DISCHARGE],
+			"fg-slope-limit-low-temp-dischg", rc,
+			SLOPE_LIMIT_LOW_TEMP_DISCHG);
+
+		OF_READ_PROPERTY(chip->slope_limit_coeffs[HIGH_TEMP_DISCHARGE],
+			"fg-slope-limit-high-temp-dischg", rc,
+			SLOPE_LIMIT_HIGH_TEMP_DISCHG);
+
+		if (fg_debug_mask & FG_STATUS)
+			pr_info("slope-limiter, temp: %d coeffs: [%d %d %d %d]\n",
+				chip->slope_limit_temp,
+				chip->slope_limit_coeffs[LOW_TEMP_CHARGE],
+				chip->slope_limit_coeffs[HIGH_TEMP_CHARGE],
+				chip->slope_limit_coeffs[LOW_TEMP_DISCHARGE],
+				chip->slope_limit_coeffs[HIGH_TEMP_DISCHARGE]);
+	}
+
+	OF_READ_PROPERTY(chip->rconn_mohm, "fg-rconn-mohm", rc, 0);
+
+	chip->dischg_gain.enable = of_property_read_bool(node,
+					"qcom,fg-dischg-voltage-gain-ctrl");
+	if (chip->dischg_gain.enable) {
+		rc = fg_dischg_gain_dt_init(chip);
+		if (rc) {
+			pr_err("Error in reading dischg_gain parameters, rc=%d\n",
+				rc);
+			rc = 0;
+		}
+	}
+
+	chip->use_vbat_low_empty_soc = of_property_read_bool(node,
+					"qcom,fg-use-vbat-low-empty-soc");
+
+	OF_READ_PROPERTY(chip->batt_temp_low_limit,
+			"fg-batt-temp-low-limit", rc, BATT_TEMP_LOW_LIMIT);
+
+	OF_READ_PROPERTY(chip->batt_temp_high_limit,
+			"fg-batt-temp-high-limit", rc, BATT_TEMP_HIGH_LIMIT);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("batt-temp-low_limit: %d batt-temp-high_limit: %d\n",
+			chip->batt_temp_low_limit, chip->batt_temp_high_limit);
+
+	OF_READ_PROPERTY(chip->cc_soc_limit_pct, "fg-cc-soc-limit-pct", rc, 0);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("cc-soc-limit-pct: %d\n", chip->cc_soc_limit_pct);
+
+	chip->batt_info_restore = of_property_read_bool(node,
+					"qcom,fg-restore-batt-info");
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("restore: %d validate_by_ocv: %d range_pct: %d\n",
+			chip->batt_info_restore, fg_batt_valid_ocv,
+			fg_batt_range_pct);
+
 	return rc;
 }
 
@@ -5528,15 +7269,22 @@
 					chip->soc_irq[FULL_SOC].irq, rc);
 				return rc;
 			}
-			rc = devm_request_irq(chip->dev,
-				chip->soc_irq[EMPTY_SOC].irq,
-				fg_empty_soc_irq_handler,
-				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-				"empty-soc", chip);
-			if (rc < 0) {
-				pr_err("Can't request %d empty-soc: %d\n",
-					chip->soc_irq[EMPTY_SOC].irq, rc);
-				return rc;
+			enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+			chip->full_soc_irq_enabled = true;
+
+			if (!chip->use_vbat_low_empty_soc) {
+				rc = devm_request_irq(chip->dev,
+					chip->soc_irq[EMPTY_SOC].irq,
+					fg_empty_soc_irq_handler,
+					IRQF_TRIGGER_RISING |
+					IRQF_TRIGGER_FALLING,
+					"empty-soc", chip);
+				if (rc < 0) {
+					pr_err("Can't request %d empty-soc: %d\n",
+						chip->soc_irq[EMPTY_SOC].irq,
+						rc);
+					return rc;
+				}
 			}
 			rc = devm_request_irq(chip->dev,
 				chip->soc_irq[DELTA_SOC].irq,
@@ -5558,8 +7306,8 @@
 			}
 
 			enable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
-			enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
-			enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
+			if (!chip->use_vbat_low_empty_soc)
+				enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
 			break;
 		case FG_MEMIF:
 			chip->mem_irq[FG_MEM_AVAIL].irq
@@ -5581,8 +7329,53 @@
 			}
 			break;
 		case FG_BATT:
-			chip->batt_irq[BATT_MISSING].irq
-				= of_irq_get_byname(child, "batt-missing");
+			chip->batt_irq[JEITA_SOFT_COLD].irq =
+				of_irq_get_byname(child, "soft-cold");
+			if (chip->batt_irq[JEITA_SOFT_COLD].irq < 0) {
+				pr_err("Unable to get soft-cold irq\n");
+				rc = -EINVAL;
+				return rc;
+			}
+			rc = devm_request_threaded_irq(chip->dev,
+					chip->batt_irq[JEITA_SOFT_COLD].irq,
+					NULL,
+					fg_jeita_soft_cold_irq_handler,
+					IRQF_TRIGGER_RISING |
+					IRQF_TRIGGER_FALLING |
+					IRQF_ONESHOT,
+					"soft-cold", chip);
+			if (rc < 0) {
+				pr_err("Can't request %d soft-cold: %d\n",
+					chip->batt_irq[JEITA_SOFT_COLD].irq,
+								rc);
+				return rc;
+			}
+			disable_irq(chip->batt_irq[JEITA_SOFT_COLD].irq);
+			chip->batt_irq[JEITA_SOFT_COLD].disabled = true;
+			chip->batt_irq[JEITA_SOFT_HOT].irq =
+				of_irq_get_byname(child, "soft-hot");
+			if (chip->batt_irq[JEITA_SOFT_HOT].irq < 0) {
+				pr_err("Unable to get soft-hot irq\n");
+				rc = -EINVAL;
+				return rc;
+			}
+			rc = devm_request_threaded_irq(chip->dev,
+					chip->batt_irq[JEITA_SOFT_HOT].irq,
+					NULL,
+					fg_jeita_soft_hot_irq_handler,
+					IRQF_TRIGGER_RISING |
+					IRQF_TRIGGER_FALLING |
+					IRQF_ONESHOT,
+					"soft-hot", chip);
+			if (rc < 0) {
+				pr_err("Can't request %d soft-hot: %d\n",
+					chip->batt_irq[JEITA_SOFT_HOT].irq, rc);
+				return rc;
+			}
+			disable_irq(chip->batt_irq[JEITA_SOFT_HOT].irq);
+			chip->batt_irq[JEITA_SOFT_HOT].disabled = true;
+			chip->batt_irq[BATT_MISSING].irq =
+				of_irq_get_byname(child, "batt-missing");
 			if (chip->batt_irq[BATT_MISSING].irq < 0) {
 				pr_err("Unable to get batt-missing irq\n");
 				rc = -EINVAL;
@@ -5619,8 +7412,14 @@
 					chip->batt_irq[VBATT_LOW].irq, rc);
 				return rc;
 			}
-			disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
-			chip->vbat_low_irq_enabled = false;
+			if (chip->use_vbat_low_empty_soc) {
+				enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+				chip->vbat_low_irq_enabled = true;
+			} else {
+				disable_irq_nosync(
+					chip->batt_irq[VBATT_LOW].irq);
+				chip->vbat_low_irq_enabled = false;
+			}
 			break;
 		case FG_ADC:
 			break;
@@ -5630,17 +7429,22 @@
 		}
 	}
 
+	chip->irqs_enabled = true;
 	return rc;
 }
 
-static void fg_cleanup(struct fg_chip *chip)
+static void fg_cancel_all_works(struct fg_chip *chip)
 {
+	cancel_delayed_work_sync(&chip->check_sanity_work);
 	cancel_delayed_work_sync(&chip->update_sram_data);
 	cancel_delayed_work_sync(&chip->update_temp_work);
 	cancel_delayed_work_sync(&chip->update_jeita_setting);
 	cancel_delayed_work_sync(&chip->check_empty_work);
 	cancel_delayed_work_sync(&chip->batt_profile_init);
 	alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
+	alarm_try_to_cancel(&chip->hard_jeita_alarm);
+	if (!chip->ima_error_handling)
+		cancel_work_sync(&chip->ima_error_recovery_work);
 	cancel_work_sync(&chip->rslow_comp_work);
 	cancel_work_sync(&chip->set_resume_soc_work);
 	cancel_work_sync(&chip->fg_cap_learning_work);
@@ -5652,12 +7456,23 @@
 	cancel_work_sync(&chip->gain_comp_work);
 	cancel_work_sync(&chip->init_work);
 	cancel_work_sync(&chip->charge_full_work);
+	cancel_work_sync(&chip->bcl_hi_power_work);
 	cancel_work_sync(&chip->esr_extract_config_work);
+	cancel_work_sync(&chip->slope_limiter_work);
+	cancel_work_sync(&chip->dischg_gain_work);
+	cancel_work_sync(&chip->cc_soc_store_work);
+}
+
+static void fg_cleanup(struct fg_chip *chip)
+{
+	fg_cancel_all_works(chip);
+	power_supply_unregister(chip->bms_psy);
 	mutex_destroy(&chip->rslow_comp.lock);
 	mutex_destroy(&chip->rw_lock);
 	mutex_destroy(&chip->cyc_ctr.lock);
 	mutex_destroy(&chip->learning_data.learning_lock);
 	mutex_destroy(&chip->sysfs_restart_lock);
+	mutex_destroy(&chip->ima_recovery_lock);
 	wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
 	wakeup_source_trash(&chip->empty_check_wakeup_source.source);
 	wakeup_source_trash(&chip->memif_wakeup_source.source);
@@ -5667,6 +7482,11 @@
 	wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
 	wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
 	wakeup_source_trash(&chip->esr_extract_wakeup_source.source);
+	wakeup_source_trash(&chip->slope_limit_wakeup_source.source);
+	wakeup_source_trash(&chip->dischg_gain_wakeup_source.source);
+	wakeup_source_trash(&chip->fg_reset_wakeup_source.source);
+	wakeup_source_trash(&chip->cc_soc_wakeup_source.source);
+	wakeup_source_trash(&chip->sanity_wakeup_source.source);
 }
 
 static int fg_remove(struct platform_device *pdev)
@@ -6155,12 +7975,13 @@
 	return 0;
 }
 
-#define FG_ALG_SYSCTL_1	0x4B0
-#define SOC_CNFG	0x450
-#define SOC_DELTA_OFFSET	3
-#define DELTA_SOC_PERCENT	1
-#define I_TERM_QUAL_BIT		BIT(1)
-#define PATCH_NEG_CURRENT_BIT	BIT(3)
+#define FG_ALG_SYSCTL_1			0x4B0
+#define SOC_CNFG			0x450
+#define SOC_DELTA_OFFSET		3
+#define DELTA_SOC_PERCENT		1
+#define ALERT_CFG_OFFSET		3
+#define I_TERM_QUAL_BIT			BIT(1)
+#define PATCH_NEG_CURRENT_BIT		BIT(3)
 #define KI_COEFF_PRED_FULL_ADDR		0x408
 #define KI_COEFF_PRED_FULL_4_0_MSB	0x88
 #define KI_COEFF_PRED_FULL_4_0_LSB	0x00
@@ -6168,6 +7989,12 @@
 #define FG_ADC_CONFIG_REG		0x4B8
 #define FG_BCL_CONFIG_OFFSET		0x3
 #define BCL_FORCED_HPM_IN_CHARGE	BIT(2)
+#define IRQ_USE_VOLTAGE_HYST_BIT	BIT(0)
+#define EMPTY_FROM_VOLTAGE_BIT		BIT(1)
+#define EMPTY_FROM_SOC_BIT		BIT(2)
+#define EMPTY_SOC_IRQ_MASK		(IRQ_USE_VOLTAGE_HYST_BIT | \
+					EMPTY_FROM_SOC_BIT | \
+					EMPTY_FROM_VOLTAGE_BIT)
 static int fg_common_hw_init(struct fg_chip *chip)
 {
 	int rc;
@@ -6176,8 +8003,9 @@
 
 	update_iterm(chip);
 	update_cutoff_voltage(chip);
-	update_irq_volt_empty(chip);
 	update_bcl_thresholds(chip);
+	if (!chip->use_vbat_low_empty_soc)
+		update_irq_volt_empty(chip);
 
 	resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
 	if (resume_soc_raw > 0) {
@@ -6207,6 +8035,11 @@
 		return rc;
 	}
 
+	/* Override the voltage threshold for vbatt_low with empty_volt */
+	if (chip->use_vbat_low_empty_soc)
+		settings[FG_MEM_BATT_LOW].value =
+			settings[FG_MEM_IRQ_VOLT_EMPTY].value;
+
 	rc = fg_mem_masked_write(chip, settings[FG_MEM_BATT_LOW].address, 0xFF,
 			batt_to_setpoint_8b(settings[FG_MEM_BATT_LOW].value),
 			settings[FG_MEM_BATT_LOW].offset);
@@ -6274,20 +8107,41 @@
 		if (fg_debug_mask & FG_STATUS)
 			pr_info("imptr_pulse_slow is %sabled\n",
 				chip->imptr_pulse_slow_en ? "en" : "dis");
+	}
 
-		rc = fg_mem_read(chip, &val, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET,
-				0);
-		if (rc) {
-			pr_err("unable to read rslow cfg: %d\n", rc);
-			return rc;
-		}
+	rc = fg_mem_read(chip, &val, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET,
+			0);
+	if (rc) {
+		pr_err("unable to read rslow cfg: %d\n", rc);
+		return rc;
+	}
 
-		if (val & RSLOW_CFG_ON_VAL)
-			chip->rslow_comp.active = true;
+	if (val & RSLOW_CFG_ON_VAL)
+		chip->rslow_comp.active = true;
 
-		if (fg_debug_mask & FG_STATUS)
-			pr_info("rslow_comp active is %sabled\n",
-				chip->rslow_comp.active ? "en" : "dis");
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("rslow_comp active is %sabled\n",
+			chip->rslow_comp.active ? "en" : "dis");
+
+	/*
+	 * Clear bits 0-2 in 0x4B3 and set them again to make empty_soc irq
+	 * trigger again.
+	 */
+	rc = fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, EMPTY_SOC_IRQ_MASK,
+			0, ALERT_CFG_OFFSET);
+	if (rc) {
+		pr_err("failed to write to 0x4B3 rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Wait for a FG cycle before enabling empty soc irq configuration */
+	msleep(FG_CYCLE_MS);
+
+	rc = fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, EMPTY_SOC_IRQ_MASK,
+			EMPTY_SOC_IRQ_MASK, ALERT_CFG_OFFSET);
+	if (rc) {
+		pr_err("failed to write to 0x4B3 rc=%d\n", rc);
+		return rc;
 	}
 
 	return 0;
@@ -6414,12 +8268,13 @@
 		/* Setup workaround flag based on PMIC type */
 		if (fg_sense_type == INTERNAL_CURRENT_SENSE)
 			chip->wa_flag |= IADC_GAIN_COMP_WA;
-		if (chip->pmic_revision[REVID_DIG_MAJOR] > 1)
+		if (chip->pmic_revision[REVID_DIG_MAJOR] >= 1)
 			chip->wa_flag |= USE_CC_SOC_REG;
 
 		break;
 	case PMI8950:
 	case PMI8937:
+	case PMI8940:
 		rc = fg_8950_hw_init(chip);
 		/* Setup workaround flag based on PMIC type */
 		chip->wa_flag |= BCL_HI_POWER_FOR_CHGLED_WA;
@@ -6438,12 +8293,223 @@
 	return rc;
 }
 
+static int fg_init_iadc_config(struct fg_chip *chip)
+{
+	u8 reg[2];
+	int rc;
+
+	/* read default gain config */
+	rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, DEF_GAIN_OFFSET, 0);
+	if (rc) {
+		pr_err("Failed to read default gain rc=%d\n", rc);
+		return rc;
+	}
+
+	if (reg[1] || reg[0]) {
+		/*
+		 * Default gain register has valid value:
+		 * - write to gain register.
+		 */
+		rc = fg_mem_write(chip, reg, GAIN_REG, 2,
+						GAIN_OFFSET, 0);
+		if (rc) {
+			pr_err("Failed to write gain rc=%d\n", rc);
+			return rc;
+		}
+	} else {
+		/*
+		 * Default gain register is invalid:
+		 * - read gain register for default gain value
+		 * - write to default gain register.
+		 */
+		rc = fg_mem_read(chip, reg, GAIN_REG, 2,
+						GAIN_OFFSET, 0);
+		if (rc) {
+			pr_err("Failed to read gain rc=%d\n", rc);
+			return rc;
+		}
+		rc = fg_mem_write(chip, reg, K_VCOR_REG, 2,
+						DEF_GAIN_OFFSET, 0);
+		if (rc) {
+			pr_err("Failed to write default gain rc=%d\n",
+								rc);
+			return rc;
+		}
+	}
+
+	chip->iadc_comp_data.dfl_gain_reg[0] = reg[0];
+	chip->iadc_comp_data.dfl_gain_reg[1] = reg[1];
+	chip->iadc_comp_data.dfl_gain = half_float(reg);
+
+	pr_debug("IADC gain initial config reg_val 0x%x%x gain %lld\n",
+		       reg[1], reg[0], chip->iadc_comp_data.dfl_gain);
+	return 0;
+}
+
+#define EN_WR_FGXCT_PRD		BIT(6)
+#define EN_RD_FGXCT_PRD		BIT(5)
+#define FG_RESTART_TIMEOUT_MS	12000
+static void ima_error_recovery_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work,
+				struct fg_chip,
+				ima_error_recovery_work);
+	bool tried_again = false;
+	int rc;
+	u8 buf[4] = {0, 0, 0, 0};
+
+	fg_stay_awake(&chip->fg_reset_wakeup_source);
+	mutex_lock(&chip->ima_recovery_lock);
+	if (!chip->ima_error_handling) {
+		pr_err("Scheduled by mistake?\n");
+		mutex_unlock(&chip->ima_recovery_lock);
+		fg_relax(&chip->fg_reset_wakeup_source);
+		return;
+	}
+
+	/*
+	 * SOC should be read and used until the error recovery completes.
+	 * Without this, there could be a fluctuation in SOC values notified
+	 * to the userspace.
+	 */
+	chip->use_last_soc = true;
+
+	/* Block SRAM access till FG reset is complete */
+	chip->block_sram_access = true;
+
+	/* Release the mutex to avoid deadlock while cancelling the works */
+	mutex_unlock(&chip->ima_recovery_lock);
+
+	/* Cancel all the works */
+	fg_cancel_all_works(chip);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("last_soc: %d\n", chip->last_soc);
+
+	mutex_lock(&chip->ima_recovery_lock);
+	/* Acquire IMA access forcibly from FG ALG */
+	rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+			EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD,
+			EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD, 1);
+	if (rc) {
+		pr_err("Error in writing to IMA_CFG, rc=%d\n", rc);
+		goto out;
+	}
+
+	/* Release the IMA access now so that FG reset can go through */
+	rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+			EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD, 0, 1);
+	if (rc) {
+		pr_err("Error in writing to IMA_CFG, rc=%d\n", rc);
+		goto out;
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("resetting FG\n");
+
+	/* Assert FG reset */
+	rc = fg_reset(chip, true);
+	if (rc) {
+		pr_err("Couldn't reset FG\n");
+		goto out;
+	}
+
+	/* Wait for a small time before deasserting FG reset */
+	msleep(100);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("clearing FG from reset\n");
+
+	/* Deassert FG reset */
+	rc = fg_reset(chip, false);
+	if (rc) {
+		pr_err("Couldn't clear FG reset\n");
+		goto out;
+	}
+
+	/* Wait for at least a FG cycle before doing SRAM access */
+	msleep(2000);
+
+	chip->block_sram_access = false;
+
+	if (!chip->init_done) {
+		schedule_work(&chip->init_work);
+		goto wait;
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("Calling hw_init\n");
+
+	/*
+	 * Once FG is reset, everything in SRAM will be wiped out. Redo
+	 * hw_init, update jeita settings etc., again to make sure all
+	 * the settings got restored again.
+	 */
+	rc = fg_hw_init(chip);
+	if (rc) {
+		pr_err("Error in hw_init, rc=%d\n", rc);
+		goto out;
+	}
+
+	update_jeita_setting(&chip->update_jeita_setting.work);
+
+	if (chip->wa_flag & IADC_GAIN_COMP_WA) {
+		rc = fg_init_iadc_config(chip);
+		if (rc)
+			goto out;
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("loading battery profile\n");
+	if (!chip->use_otp_profile) {
+		chip->battery_missing = true;
+		chip->profile_loaded = false;
+		chip->soc_reporting_ready = false;
+		chip->batt_type = default_batt_type;
+		fg_handle_battery_insertion(chip);
+	}
+
+wait:
+	rc = wait_for_completion_interruptible_timeout(&chip->fg_reset_done,
+			msecs_to_jiffies(FG_RESTART_TIMEOUT_MS));
+
+	/* If we were interrupted wait again one more time. */
+	if (rc == -ERESTARTSYS && !tried_again) {
+		tried_again = true;
+		pr_debug("interrupted, waiting again\n");
+		goto wait;
+	} else if (rc <= 0) {
+		pr_err("fg_restart taking long time rc=%d\n", rc);
+		goto out;
+	}
+
+	rc = fg_mem_write(chip, buf, fg_data[FG_DATA_VINT_ERR].address,
+			fg_data[FG_DATA_VINT_ERR].len,
+			fg_data[FG_DATA_VINT_ERR].offset, 0);
+	if (rc < 0)
+		pr_err("Error in clearing VACT_INT_ERR, rc=%d\n", rc);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("IMA error recovery done...\n");
+out:
+	fg_restore_soc(chip);
+	fg_restore_cc_soc(chip);
+	fg_enable_irqs(chip, true);
+	update_sram_data_work(&chip->update_sram_data.work);
+	update_temp_data(&chip->update_temp_work.work);
+	schedule_delayed_work(&chip->check_sanity_work,
+		msecs_to_jiffies(1000));
+	chip->ima_error_handling = false;
+	mutex_unlock(&chip->ima_recovery_lock);
+	fg_relax(&chip->fg_reset_wakeup_source);
+}
+
 #define DIG_MINOR		0x0
 #define DIG_MAJOR		0x1
 #define ANA_MINOR		0x2
 #define ANA_MAJOR		0x3
 #define IACS_INTR_SRC_SLCT	BIT(3)
-static int fg_setup_memif_offset(struct fg_chip *chip)
+static int fg_memif_init(struct fg_chip *chip)
 {
 	int rc;
 
@@ -6464,7 +8530,7 @@
 		break;
 	default:
 		pr_err("Digital Major rev=%d not supported\n",
-				chip->revision[DIG_MAJOR]);
+					chip->revision[DIG_MAJOR]);
 		return -EINVAL;
 	}
 
@@ -6481,6 +8547,13 @@
 			pr_err("failed to configure interrupt source %d\n", rc);
 			return rc;
 		}
+
+		/* check for error condition */
+		rc = fg_check_ima_exception(chip, true);
+		if (rc) {
+			pr_err("Error in clearing IMA exception rc=%d", rc);
+			return rc;
+		}
 	}
 
 	return 0;
@@ -6515,6 +8588,7 @@
 	case PMI8950:
 	case PMI8937:
 	case PMI8996:
+	case PMI8940:
 		chip->pmic_subtype = pmic_rev_id->pmic_subtype;
 		chip->pmic_revision[REVID_RESERVED]	= pmic_rev_id->rev1;
 		chip->pmic_revision[REVID_VARIANT]	= pmic_rev_id->rev2;
@@ -6531,10 +8605,8 @@
 }
 
 #define INIT_JEITA_DELAY_MS 1000
-
 static void delayed_init_work(struct work_struct *work)
 {
-	u8 reg[2];
 	int rc;
 	struct fg_chip *chip = container_of(work,
 				struct fg_chip,
@@ -6546,6 +8618,14 @@
 	rc = fg_hw_init(chip);
 	if (rc) {
 		pr_err("failed to hw init rc = %d\n", rc);
+		if (!chip->init_done && chip->ima_supported) {
+			rc = fg_check_alg_status(chip);
+			if (rc && rc != -EBUSY)
+				pr_err("Couldn't check FG ALG status, rc=%d\n",
+					rc);
+			fg_mem_release(chip);
+			return;
+		}
 		fg_mem_release(chip);
 		fg_cleanup(chip);
 		return;
@@ -6566,57 +8646,19 @@
 	if (!chip->use_otp_profile)
 		schedule_delayed_work(&chip->batt_profile_init, 0);
 
+	if (chip->ima_supported && fg_reset_on_lockup)
+		schedule_delayed_work(&chip->check_sanity_work,
+			msecs_to_jiffies(1000));
+
 	if (chip->wa_flag & IADC_GAIN_COMP_WA) {
-		/* read default gain config */
-		rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, DEF_GAIN_OFFSET, 0);
-		if (rc) {
-			pr_err("Failed to read default gain rc=%d\n", rc);
+		rc = fg_init_iadc_config(chip);
+		if (rc)
 			goto done;
-		}
-
-		if (reg[1] || reg[0]) {
-			/*
-			 * Default gain register has valid value:
-			 * - write to gain register.
-			 */
-			rc = fg_mem_write(chip, reg, GAIN_REG, 2,
-							GAIN_OFFSET, 0);
-			if (rc) {
-				pr_err("Failed to write gain rc=%d\n", rc);
-				goto done;
-			}
-		} else {
-			/*
-			 * Default gain register is invalid:
-			 * - read gain register for default gain value
-			 * - write to default gain register.
-			 */
-			rc = fg_mem_read(chip, reg, GAIN_REG, 2,
-							GAIN_OFFSET, 0);
-			if (rc) {
-				pr_err("Failed to read gain rc=%d\n", rc);
-				goto done;
-			}
-			rc = fg_mem_write(chip, reg, K_VCOR_REG, 2,
-							DEF_GAIN_OFFSET, 0);
-			if (rc) {
-				pr_err("Failed to write default gain rc=%d\n",
-									rc);
-				goto done;
-			}
-		}
-
-		chip->iadc_comp_data.dfl_gain_reg[0] = reg[0];
-		chip->iadc_comp_data.dfl_gain_reg[1] = reg[1];
-		chip->iadc_comp_data.dfl_gain = half_float(reg);
-		chip->input_present = is_input_present(chip);
-		chip->otg_present = is_otg_present(chip);
-		chip->init_done = true;
-
-		pr_debug("IADC gain initial config reg_val 0x%x%x gain %lld\n",
-			       reg[1], reg[0], chip->iadc_comp_data.dfl_gain);
 	}
 
+	chip->input_present = is_input_present(chip);
+	chip->otg_present = is_otg_present(chip);
+	chip->init_done = true;
 	pr_debug("FG: HW_init success\n");
 
 	return;
@@ -6675,16 +8717,30 @@
 			"qpnp_fg_cap_learning");
 	wakeup_source_init(&chip->esr_extract_wakeup_source.source,
 			"qpnp_fg_esr_extract");
+	wakeup_source_init(&chip->slope_limit_wakeup_source.source,
+			"qpnp_fg_slope_limit");
+	wakeup_source_init(&chip->dischg_gain_wakeup_source.source,
+			"qpnp_fg_dischg_gain");
+	wakeup_source_init(&chip->fg_reset_wakeup_source.source,
+			"qpnp_fg_reset");
+	wakeup_source_init(&chip->cc_soc_wakeup_source.source,
+			"qpnp_fg_cc_soc");
+	wakeup_source_init(&chip->sanity_wakeup_source.source,
+			"qpnp_fg_sanity_check");
+	spin_lock_init(&chip->sec_access_lock);
 	mutex_init(&chip->rw_lock);
 	mutex_init(&chip->cyc_ctr.lock);
 	mutex_init(&chip->learning_data.learning_lock);
 	mutex_init(&chip->rslow_comp.lock);
 	mutex_init(&chip->sysfs_restart_lock);
+	mutex_init(&chip->ima_recovery_lock);
 	INIT_DELAYED_WORK(&chip->update_jeita_setting, update_jeita_setting);
 	INIT_DELAYED_WORK(&chip->update_sram_data, update_sram_data_work);
 	INIT_DELAYED_WORK(&chip->update_temp_work, update_temp_data);
 	INIT_DELAYED_WORK(&chip->check_empty_work, check_empty_work);
 	INIT_DELAYED_WORK(&chip->batt_profile_init, batt_profile_init);
+	INIT_DELAYED_WORK(&chip->check_sanity_work, check_sanity_work);
+	INIT_WORK(&chip->ima_error_recovery_work, ima_error_recovery_work);
 	INIT_WORK(&chip->rslow_comp_work, rslow_comp_work);
 	INIT_WORK(&chip->fg_cap_learning_work, fg_cap_learning_work);
 	INIT_WORK(&chip->dump_sram, dump_sram);
@@ -6699,13 +8755,19 @@
 	INIT_WORK(&chip->gain_comp_work, iadc_gain_comp_work);
 	INIT_WORK(&chip->bcl_hi_power_work, bcl_hi_power_work);
 	INIT_WORK(&chip->esr_extract_config_work, esr_extract_config_work);
+	INIT_WORK(&chip->slope_limiter_work, slope_limiter_work);
+	INIT_WORK(&chip->dischg_gain_work, discharge_gain_work);
+	INIT_WORK(&chip->cc_soc_store_work, cc_soc_store_work);
 	alarm_init(&chip->fg_cap_learning_alarm, ALARM_BOOTTIME,
 			fg_cap_learning_alarm_cb);
+	alarm_init(&chip->hard_jeita_alarm, ALARM_BOOTTIME,
+			fg_hard_jeita_alarm_cb);
 	init_completion(&chip->sram_access_granted);
 	init_completion(&chip->sram_access_revoked);
 	complete_all(&chip->sram_access_revoked);
 	init_completion(&chip->batt_id_avail);
 	init_completion(&chip->first_soc_done);
+	init_completion(&chip->fg_reset_done);
 	dev_set_drvdata(&pdev->dev, chip);
 
 	if (of_get_available_child_count(pdev->dev.of_node) == 0) {
@@ -6763,7 +8825,7 @@
 		return rc;
 	}
 
-	rc = fg_setup_memif_offset(chip);
+	rc = fg_memif_init(chip);
 	if (rc) {
 		pr_err("Unable to setup mem_if offsets rc=%d\n", rc);
 		goto of_init_fail;
@@ -6834,10 +8896,18 @@
 		rc = fg_dfs_create(chip);
 		if (rc < 0) {
 			pr_err("failed to create debugfs rc = %d\n", rc);
-			goto cancel_work;
+			goto power_supply_unregister;
 		}
 	}
 
+	/* Fake temperature till the actual temperature is read */
+	chip->last_good_temp = 250;
+
+	/* Initialize batt_info variables */
+	chip->batt_range_ocv = &fg_batt_valid_ocv;
+	chip->batt_range_pct = &fg_batt_range_pct;
+	memset(chip->batt_info, INT_MAX, sizeof(chip->batt_info));
+
 	schedule_work(&chip->init_work);
 
 	pr_info("FG Probe success - FG Revision DIG:%d.%d ANA:%d.%d PMIC subtype=%d\n",
@@ -6847,32 +8917,17 @@
 
 	return rc;
 
+power_supply_unregister:
+	power_supply_unregister(chip->bms_psy);
 cancel_work:
-	cancel_delayed_work_sync(&chip->update_jeita_setting);
-	cancel_delayed_work_sync(&chip->update_sram_data);
-	cancel_delayed_work_sync(&chip->update_temp_work);
-	cancel_delayed_work_sync(&chip->check_empty_work);
-	cancel_delayed_work_sync(&chip->batt_profile_init);
-	alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
-	cancel_work_sync(&chip->set_resume_soc_work);
-	cancel_work_sync(&chip->fg_cap_learning_work);
-	cancel_work_sync(&chip->dump_sram);
-	cancel_work_sync(&chip->status_change_work);
-	cancel_work_sync(&chip->cycle_count_work);
-	cancel_work_sync(&chip->update_esr_work);
-	cancel_work_sync(&chip->rslow_comp_work);
-	cancel_work_sync(&chip->sysfs_restart_work);
-	cancel_work_sync(&chip->gain_comp_work);
-	cancel_work_sync(&chip->init_work);
-	cancel_work_sync(&chip->charge_full_work);
-	cancel_work_sync(&chip->bcl_hi_power_work);
-	cancel_work_sync(&chip->esr_extract_config_work);
+	fg_cancel_all_works(chip);
 of_init_fail:
 	mutex_destroy(&chip->rslow_comp.lock);
 	mutex_destroy(&chip->rw_lock);
 	mutex_destroy(&chip->cyc_ctr.lock);
 	mutex_destroy(&chip->learning_data.learning_lock);
 	mutex_destroy(&chip->sysfs_restart_lock);
+	mutex_destroy(&chip->ima_recovery_lock);
 	wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
 	wakeup_source_trash(&chip->empty_check_wakeup_source.source);
 	wakeup_source_trash(&chip->memif_wakeup_source.source);
@@ -6882,6 +8937,11 @@
 	wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
 	wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
 	wakeup_source_trash(&chip->esr_extract_wakeup_source.source);
+	wakeup_source_trash(&chip->slope_limit_wakeup_source.source);
+	wakeup_source_trash(&chip->dischg_gain_wakeup_source.source);
+	wakeup_source_trash(&chip->fg_reset_wakeup_source.source);
+	wakeup_source_trash(&chip->cc_soc_wakeup_source.source);
+	wakeup_source_trash(&chip->sanity_wakeup_source.source);
 	return rc;
 }
 
@@ -6938,11 +8998,103 @@
 	return 0;
 }
 
+static void fg_check_ima_idle(struct fg_chip *chip)
+{
+	bool rif_mem_sts = true;
+	int rc, time_count = 0;
+
+	mutex_lock(&chip->rw_lock);
+	/* Make sure IMA is idle */
+	while (1) {
+		rc = fg_check_rif_mem_access(chip, &rif_mem_sts);
+		if (rc)
+			break;
+
+		if (!rif_mem_sts)
+			break;
+
+		if  (time_count > 4) {
+			pr_err("Waited for ~16ms polling RIF_MEM_ACCESS_REQ\n");
+			fg_run_iacs_clear_sequence(chip);
+			break;
+		}
+
+		/* Wait for 4ms before reading RIF_MEM_ACCESS_REQ again */
+		usleep_range(4000, 4100);
+		time_count++;
+	}
+	mutex_unlock(&chip->rw_lock);
+}
+
+static void fg_shutdown(struct platform_device *pdev)
+{
+	struct fg_chip *chip = dev_get_drvdata(&pdev->dev);
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_emerg("FG shutdown started\n");
+	fg_cancel_all_works(chip);
+	fg_check_ima_idle(chip);
+	chip->fg_shutdown = true;
+	if (fg_debug_mask & FG_STATUS)
+		pr_emerg("FG shutdown complete\n");
+}
+
 static const struct dev_pm_ops qpnp_fg_pm_ops = {
 	.suspend	= fg_suspend,
 	.resume		= fg_resume,
 };
 
+static int fg_reset_lockup_set(const char *val, const struct kernel_param *kp)
+{
+	int rc;
+	struct power_supply *bms_psy;
+	struct fg_chip *chip;
+	int old_val = fg_reset_on_lockup;
+
+	rc = param_set_int(val, kp);
+	if (rc) {
+		pr_err("Unable to set fg_reset_on_lockup: %d\n", rc);
+		return rc;
+	}
+
+	if (fg_reset_on_lockup != 0 && fg_reset_on_lockup != 1) {
+		pr_err("Bad value %d\n", fg_reset_on_lockup);
+		fg_reset_on_lockup = old_val;
+		return -EINVAL;
+	}
+
+	bms_psy = power_supply_get_by_name("bms");
+	if (!bms_psy) {
+		pr_err("bms psy not found\n");
+		return 0;
+	}
+
+	chip = power_supply_get_drvdata(bms_psy);
+	if (!chip->ima_supported) {
+		pr_err("Cannot set this for non-IMA supported FG\n");
+		fg_reset_on_lockup = old_val;
+		return -EINVAL;
+	}
+
+	if (fg_debug_mask & FG_STATUS)
+		pr_info("fg_reset_on_lockup set to %d\n", fg_reset_on_lockup);
+
+	if (fg_reset_on_lockup)
+		schedule_delayed_work(&chip->check_sanity_work,
+			msecs_to_jiffies(1000));
+	else
+		cancel_delayed_work_sync(&chip->check_sanity_work);
+
+	return rc;
+}
+
+static struct kernel_param_ops fg_reset_ops = {
+	.set = fg_reset_lockup_set,
+	.get = param_get_int,
+};
+
+module_param_cb(reset_on_lockup, &fg_reset_ops, &fg_reset_on_lockup, 0644);
+
 static int fg_sense_type_set(const char *val, const struct kernel_param *kp)
 {
 	int rc;
@@ -7025,6 +9177,7 @@
 	},
 	.probe		= fg_probe,
 	.remove		= fg_remove,
+	.shutdown	= fg_shutdown,
 };
 
 static int __init fg_init(void)
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 5a0682c..2efe746 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -25,10 +25,12 @@
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <linux/regmap.h>
+#include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/pmic-voter.h>
 #include <linux/qpnp/qpnp-adc.h>
 #include <uapi/linux/qg.h>
+#include <uapi/linux/qg-profile.h>
 #include "fg-alg.h"
 #include "qg-sdam.h"
 #include "qg-core.h"
@@ -43,6 +45,16 @@
 	debug_mask, qg_debug_mask, int, 0600
 );
 
+static int qg_esr_mod_count = 10;
+module_param_named(
+	esr_mod_count, qg_esr_mod_count, int, 0600
+);
+
+static int qg_esr_count = 5;
+module_param_named(
+	esr_count, qg_esr_count, int, 0600
+);
+
 static bool is_battery_present(struct qpnp_qg *chip)
 {
 	u8 reg = 0;
@@ -211,6 +223,14 @@
 	}
 
 	pr_debug("Notified charger on float voltage and FCC\n");
+
+	rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, &prop);
+	if (rc < 0) {
+		pr_err("Failed to get charge term current, rc=%d\n", rc);
+		return;
+	}
+	chip->chg_iterm_ma = prop.intval;
 }
 
 static bool is_batt_available(struct qpnp_qg *chip)
@@ -228,7 +248,7 @@
 	return true;
 }
 
-static int qg_update_sdam_params(struct qpnp_qg *chip)
+static int qg_store_soc_params(struct qpnp_qg *chip)
 {
 	int rc, batt_temp = 0, i;
 	unsigned long rtc_sec = 0;
@@ -245,13 +265,11 @@
 	else
 		chip->sdam_data[SDAM_TEMP] = (u32)batt_temp;
 
-	rc = qg_sdam_write_all(chip->sdam_data);
-	if (rc < 0)
-		pr_err("Failed to write to SDAM rc=%d\n", rc);
-
-	for (i = 0; i < SDAM_MAX; i++)
+	for (i = 0; i <= SDAM_TIME_SEC; i++) {
+		rc |= qg_sdam_write(i, chip->sdam_data[i]);
 		qg_dbg(chip, QG_DEBUG_STATUS, "SDAM write param %d value=%d\n",
 					i, chip->sdam_data[i]);
+	}
 
 	return rc;
 }
@@ -433,6 +451,90 @@
 	return rc;
 }
 
+#define MIN_FIFO_FULL_TIME_MS			12000
+static int process_rt_fifo_data(struct qpnp_qg *chip,
+			bool update_vbat_low, bool update_smb)
+{
+	int rc = 0;
+	ktime_t now = ktime_get();
+	s64 time_delta;
+	u8 fifo_length;
+
+	/*
+	 * Reject the FIFO read event if there are back-to-back requests
+	 * This is done to gaurantee that there is always a minimum FIFO
+	 * data to be processed, ignore this if vbat_low is set.
+	 */
+	time_delta = ktime_ms_delta(now, chip->last_user_update_time);
+
+	qg_dbg(chip, QG_DEBUG_FIFO, "time_delta=%lld ms update_vbat_low=%d update_smb=%d\n",
+				time_delta, update_vbat_low, update_smb);
+
+	if (time_delta > MIN_FIFO_FULL_TIME_MS || update_vbat_low
+						|| update_smb) {
+		rc = qg_master_hold(chip, true);
+		if (rc < 0) {
+			pr_err("Failed to hold master, rc=%d\n", rc);
+			goto done;
+		}
+
+		rc = qg_process_rt_fifo(chip);
+		if (rc < 0) {
+			pr_err("Failed to process FIFO real-time, rc=%d\n", rc);
+			goto done;
+		}
+
+		if (update_vbat_low) {
+			/* change FIFO length */
+			fifo_length = chip->vbat_low ?
+					chip->dt.s2_vbat_low_fifo_length :
+					chip->dt.s2_fifo_length;
+			rc = qg_update_fifo_length(chip, fifo_length);
+			if (rc < 0)
+				goto done;
+
+			qg_dbg(chip, QG_DEBUG_STATUS,
+				"FIFO length updated to %d vbat_low=%d\n",
+					fifo_length, chip->vbat_low);
+		}
+
+		if (update_smb) {
+			rc = qg_masked_write(chip, chip->qg_base +
+				QG_MODE_CTL1_REG, PARALLEL_IBAT_SENSE_EN_BIT,
+				chip->parallel_enabled ?
+					PARALLEL_IBAT_SENSE_EN_BIT : 0);
+			if (rc < 0) {
+				pr_err("Failed to update SMB_EN, rc=%d\n", rc);
+				goto done;
+			}
+			qg_dbg(chip, QG_DEBUG_STATUS, "Parallel SENSE %d\n",
+						chip->parallel_enabled);
+		}
+
+		rc = qg_master_hold(chip, false);
+		if (rc < 0) {
+			pr_err("Failed to release master, rc=%d\n", rc);
+			goto done;
+		}
+		/* FIFOs restarted */
+		chip->last_fifo_update_time = ktime_get();
+
+		/* signal the read thread */
+		chip->data_ready = true;
+		wake_up_interruptible(&chip->qg_wait_q);
+		chip->last_user_update_time = now;
+
+		/* vote to stay awake until userspace reads data */
+		vote(chip->awake_votable, FIFO_RT_DONE_VOTER, true, 0);
+	} else {
+		qg_dbg(chip, QG_DEBUG_FIFO, "FIFO processing too early time_delta=%lld\n",
+							time_delta);
+	}
+done:
+	qg_master_hold(chip, false);
+	return rc;
+}
+
 #define VBAT_LOW_HYST_UV		50000 /* 50mV */
 static int qg_vbat_low_wa(struct qpnp_qg *chip)
 {
@@ -561,82 +663,356 @@
 	return rc;
 }
 
-#define MIN_FIFO_FULL_TIME_MS			12000
-static int process_rt_fifo_data(struct qpnp_qg *chip,
-				bool vbat_low, bool update_smb)
+static void qg_retrieve_esr_params(struct qpnp_qg *chip)
 {
-	int rc = 0;
-	ktime_t now = ktime_get();
-	s64 time_delta;
+	u32 data = 0;
+	int rc;
+
+	rc = qg_sdam_read(SDAM_ESR_CHARGE_DELTA, &data);
+	if (!rc && data) {
+		chip->kdata.param[QG_ESR_CHARGE_DELTA].data = data;
+		chip->kdata.param[QG_ESR_CHARGE_DELTA].valid = true;
+		qg_dbg(chip, QG_DEBUG_ESR,
+				"ESR_CHARGE_DELTA SDAM=%d\n", data);
+	} else if (rc < 0) {
+		pr_err("Failed to read ESR_CHARGE_DELTA rc=%d\n", rc);
+	}
+
+	rc = qg_sdam_read(SDAM_ESR_DISCHARGE_DELTA, &data);
+	if (!rc && data) {
+		chip->kdata.param[QG_ESR_DISCHARGE_DELTA].data = data;
+		chip->kdata.param[QG_ESR_DISCHARGE_DELTA].valid = true;
+		qg_dbg(chip, QG_DEBUG_ESR,
+				"ESR_DISCHARGE_DELTA SDAM=%d\n", data);
+	} else if (rc < 0) {
+		pr_err("Failed to read ESR_DISCHARGE_DELTA rc=%d\n", rc);
+	}
+
+	rc = qg_sdam_read(SDAM_ESR_CHARGE_SF, &data);
+	if (!rc && data) {
+		data = CAP(QG_ESR_SF_MIN, QG_ESR_SF_MAX, data);
+		chip->kdata.param[QG_ESR_CHARGE_SF].data = data;
+		chip->kdata.param[QG_ESR_CHARGE_SF].valid = true;
+		qg_dbg(chip, QG_DEBUG_ESR,
+				"ESR_CHARGE_SF SDAM=%d\n", data);
+	} else if (rc < 0) {
+		pr_err("Failed to read ESR_CHARGE_SF rc=%d\n", rc);
+	}
+
+	rc = qg_sdam_read(SDAM_ESR_DISCHARGE_SF, &data);
+	if (!rc && data) {
+		data = CAP(QG_ESR_SF_MIN, QG_ESR_SF_MAX, data);
+		chip->kdata.param[QG_ESR_DISCHARGE_SF].data = data;
+		chip->kdata.param[QG_ESR_DISCHARGE_SF].valid = true;
+		qg_dbg(chip, QG_DEBUG_ESR,
+				"ESR_DISCHARGE_SF SDAM=%d\n", data);
+	} else if (rc < 0) {
+		pr_err("Failed to read ESR_DISCHARGE_SF rc=%d\n", rc);
+	}
+}
+
+static void qg_store_esr_params(struct qpnp_qg *chip)
+{
+	unsigned int esr;
+
+	if (chip->udata.param[QG_ESR_CHARGE_DELTA].valid) {
+		esr = chip->udata.param[QG_ESR_CHARGE_DELTA].data;
+		qg_sdam_write(SDAM_ESR_CHARGE_DELTA, esr);
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"SDAM store ESR_CHARGE_DELTA=%d\n", esr);
+	}
+
+	if (chip->udata.param[QG_ESR_DISCHARGE_DELTA].valid) {
+		esr = chip->udata.param[QG_ESR_DISCHARGE_DELTA].data;
+		qg_sdam_write(SDAM_ESR_DISCHARGE_DELTA, esr);
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"SDAM store ESR_DISCHARGE_DELTA=%d\n", esr);
+	}
+
+	if (chip->udata.param[QG_ESR_CHARGE_SF].valid) {
+		esr = chip->udata.param[QG_ESR_CHARGE_SF].data;
+		qg_sdam_write(SDAM_ESR_CHARGE_SF, esr);
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"SDAM store ESR_CHARGE_SF=%d\n", esr);
+	}
+
+	if (chip->udata.param[QG_ESR_DISCHARGE_SF].valid) {
+		esr = chip->udata.param[QG_ESR_DISCHARGE_SF].data;
+		qg_sdam_write(SDAM_ESR_DISCHARGE_SF, esr);
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"SDAM store ESR_DISCHARGE_SF=%d\n", esr);
+	}
+}
+
+#define MAX_ESR_RETRY_COUNT		10
+#define ESR_SD_PERCENT			10
+static int qg_process_esr_data(struct qpnp_qg *chip)
+{
+	int i;
+	int pre_i, post_i, pre_v, post_v, first_pre_i = 0;
+	int diff_v, diff_i, esr_avg = 0, count = 0;
+
+	for (i = 0; i < qg_esr_count; i++) {
+		if (!chip->esr_data[i].valid)
+			continue;
+
+		pre_i = chip->esr_data[i].pre_esr_i;
+		pre_v = chip->esr_data[i].pre_esr_v;
+		post_i = chip->esr_data[i].post_esr_i;
+		post_v = chip->esr_data[i].post_esr_v;
+
+		/*
+		 * Check if any of the pre/post readings have changed
+		 * signs by comparing it with the first valid
+		 * pre_i value.
+		 */
+		if (!first_pre_i)
+			first_pre_i = pre_i;
+
+		if ((first_pre_i < 0 && pre_i > 0) ||
+			(first_pre_i > 0 && post_i < 0) ||
+			(first_pre_i < 0 && post_i > 0)) {
+			qg_dbg(chip, QG_DEBUG_ESR,
+				"ESR-sign mismatch %d reject all data\n", i);
+			esr_avg = count = 0;
+			break;
+		}
+
+		/* calculate ESR */
+		diff_v = abs(post_v - pre_v);
+		diff_i = abs(post_i - pre_i);
+
+		if (!diff_v || !diff_i ||
+			(diff_i < chip->dt.esr_qual_i_ua) ||
+			(diff_v < chip->dt.esr_qual_v_uv)) {
+			qg_dbg(chip, QG_DEBUG_ESR,
+				"ESR (%d) V/I %duA %duV fails qualification\n",
+				i, diff_i, diff_v);
+			chip->esr_data[i].valid = false;
+			continue;
+		}
+
+		chip->esr_data[i].esr =
+			DIV_ROUND_CLOSEST(diff_v * 1000, diff_i);
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"ESR qualified: i=%d pre_i=%d pre_v=%d post_i=%d post_v=%d esr_diff_v=%d esr_diff_i=%d esr=%d\n",
+			i, pre_i, pre_v, post_i, post_v,
+			diff_v, diff_i, chip->esr_data[i].esr);
+
+		esr_avg += chip->esr_data[i].esr;
+		count++;
+	}
+
+	if (!count) {
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"No ESR samples qualified, ESR not found\n");
+		chip->esr_avg = 0;
+		return 0;
+	}
+
+	esr_avg /= count;
+	qg_dbg(chip, QG_DEBUG_ESR,
+		"ESR all sample average=%d count=%d apply_SD=%d\n",
+		esr_avg, count, (esr_avg * ESR_SD_PERCENT) / 100);
 
 	/*
-	 * Reject the FIFO read event if there are back-to-back requests
-	 * This is done to gaurantee that there is always a minimum FIFO
-	 * data to be processed, ignore this if vbat_low is set.
+	 * Reject ESR samples which do not fall in
+	 * 10% the standard-deviation
 	 */
-	time_delta = ktime_ms_delta(now, chip->last_user_update_time);
+	count = 0;
+	for (i = 0; i < qg_esr_count; i++) {
+		if (!chip->esr_data[i].valid)
+			continue;
 
-	qg_dbg(chip, QG_DEBUG_FIFO, "time_delta=%lld ms vbat_low=%d\n",
-				time_delta, vbat_low);
+		if ((abs(chip->esr_data[i].esr - esr_avg) <=
+			(esr_avg * ESR_SD_PERCENT) / 100)) {
+			/* valid ESR */
+			chip->esr_avg += chip->esr_data[i].esr;
+			count++;
+			qg_dbg(chip, QG_DEBUG_ESR,
+				"Valid ESR after SD (%d) %d mOhm\n",
+				i, chip->esr_data[i].esr);
+		} else {
+			qg_dbg(chip, QG_DEBUG_ESR,
+				"ESR (%d) %d falls-out of SD(%d)\n",
+				i, chip->esr_data[i].esr, ESR_SD_PERCENT);
+		}
+	}
 
-	if (time_delta > MIN_FIFO_FULL_TIME_MS || vbat_low || update_smb) {
-		rc = qg_master_hold(chip, true);
+	if (count >= QG_MIN_ESR_COUNT) {
+		chip->esr_avg /= count;
+		qg_dbg(chip, QG_DEBUG_ESR, "Average estimated ESR %d mOhm\n",
+					chip->esr_avg);
+	} else {
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"Not enough ESR samples, ESR not found\n");
+		chip->esr_avg = 0;
+	}
+
+	return 0;
+}
+
+static int qg_esr_estimate(struct qpnp_qg *chip)
+{
+	int rc, i, ibat;
+	u8 esr_done_count, reg0 = 0, reg1 = 0;
+	bool is_charging = false;
+
+	if (chip->dt.esr_disable)
+		return 0;
+
+	/*
+	 * Charge - enable ESR estimation only during fast-charging.
+	 * Discharge - enable ESR estimation only if enabled via DT.
+	 */
+	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
+			chip->charge_type != POWER_SUPPLY_CHARGE_TYPE_FAST) {
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"Skip ESR, Not in fast-charge (CC)\n");
+		return 0;
+	}
+
+	if (chip->charge_status != POWER_SUPPLY_STATUS_CHARGING &&
+			!chip->dt.esr_discharge_enable)
+		return 0;
+
+	if (chip->batt_soc != INT_MIN && (chip->batt_soc <
+					chip->dt.esr_disable_soc)) {
+		qg_dbg(chip, QG_DEBUG_ESR,
+			"Skip ESR, batt-soc below %d\n",
+				chip->dt.esr_disable_soc);
+		return 0;
+	}
+
+	qg_dbg(chip, QG_DEBUG_ESR, "FIFO done count=%d ESR mod count=%d\n",
+			chip->fifo_done_count, qg_esr_mod_count);
+
+	if ((chip->fifo_done_count % qg_esr_mod_count) != 0)
+		return 0;
+
+	if (qg_esr_count > QG_MAX_ESR_COUNT)
+		qg_esr_count = QG_MAX_ESR_COUNT;
+
+	if (qg_esr_count < QG_MIN_ESR_COUNT)
+		qg_esr_count = QG_MIN_ESR_COUNT;
+
+	/* clear all data */
+	chip->esr_avg = 0;
+	memset(&chip->esr_data, 0, sizeof(chip->esr_data));
+
+	rc = qg_master_hold(chip, true);
+	if (rc < 0) {
+		pr_err("Failed to hold master, rc=%d\n", rc);
+		goto done;
+	}
+
+	for (i = 0; i < qg_esr_count; i++) {
+		/* Fire ESR measurement */
+		rc = qg_masked_write(chip,
+			chip->qg_base + QG_ESR_MEAS_TRIG_REG,
+			HW_ESR_MEAS_START_BIT, HW_ESR_MEAS_START_BIT);
 		if (rc < 0) {
-			pr_err("Failed to hold master, rc=%d\n", rc);
-			goto done;
+			pr_err("Failed to start ESR rc=%d\n", rc);
+			continue;
 		}
 
-		rc = qg_process_rt_fifo(chip);
-		if (rc < 0) {
-			pr_err("Failed to process FIFO real-time, rc=%d\n", rc);
-			goto done;
-		}
+		esr_done_count = reg0 = reg1 = 0;
+		do {
+			/* delay for ESR processing to complete */
+			msleep(50);
 
-		if (vbat_low) {
-			/* change FIFO length */
-			rc = qg_update_fifo_length(chip,
-					chip->dt.s2_vbat_low_fifo_length);
+			esr_done_count++;
+
+			rc = qg_read(chip,
+				chip->qg_base + QG_STATUS1_REG, &reg0, 1);
+			if (rc < 0)
+				continue;
+
+			rc = qg_read(chip,
+				chip->qg_base + QG_STATUS4_REG, &reg1, 1);
+			if (rc < 0)
+				continue;
+
+			/* check ESR-done status */
+			if (!(reg1 & ESR_MEAS_IN_PROGRESS_BIT) &&
+					(reg0 & ESR_MEAS_DONE_BIT)) {
+				qg_dbg(chip, QG_DEBUG_ESR,
+					"ESR measurement done %d count %d\n",
+						i, esr_done_count);
+				break;
+			}
+		} while (esr_done_count < MAX_ESR_RETRY_COUNT);
+
+		if (esr_done_count == MAX_ESR_RETRY_COUNT) {
+			pr_err("Failed to get ESR done for %d iteration\n", i);
+			continue;
+		} else {
+			/* found a valid ESR, read pre-post data */
+			rc = qg_read_raw_data(chip, QG_PRE_ESR_V_DATA0_REG,
+					&chip->esr_data[i].pre_esr_v);
 			if (rc < 0)
 				goto done;
 
-			qg_dbg(chip, QG_DEBUG_STATUS,
-				"FIFO length updated to %d vbat_low=%d\n",
-					chip->dt.s2_vbat_low_fifo_length,
-					vbat_low);
-		}
-
-		if (update_smb) {
-			rc = qg_masked_write(chip, chip->qg_base +
-				QG_MODE_CTL1_REG, PARALLEL_IBAT_SENSE_EN_BIT,
-				chip->parallel_enabled ?
-					PARALLEL_IBAT_SENSE_EN_BIT : 0);
-			if (rc < 0) {
-				pr_err("Failed to update SMB_EN, rc=%d\n", rc);
+			rc = qg_read_raw_data(chip, QG_PRE_ESR_I_DATA0_REG,
+					&chip->esr_data[i].pre_esr_i);
+			if (rc < 0)
 				goto done;
-			}
-			qg_dbg(chip, QG_DEBUG_STATUS, "Parallel SENSE %d\n",
-						chip->parallel_enabled);
+
+			rc = qg_read_raw_data(chip, QG_POST_ESR_V_DATA0_REG,
+					&chip->esr_data[i].post_esr_v);
+			if (rc < 0)
+				goto done;
+
+			rc = qg_read_raw_data(chip, QG_POST_ESR_I_DATA0_REG,
+					&chip->esr_data[i].post_esr_i);
+			if (rc < 0)
+				goto done;
+
+			chip->esr_data[i].pre_esr_v =
+				V_RAW_TO_UV(chip->esr_data[i].pre_esr_v);
+			ibat = sign_extend32(chip->esr_data[i].pre_esr_i, 15);
+			chip->esr_data[i].pre_esr_i = I_RAW_TO_UA(ibat);
+			chip->esr_data[i].post_esr_v =
+				V_RAW_TO_UV(chip->esr_data[i].post_esr_v);
+			ibat = sign_extend32(chip->esr_data[i].post_esr_i, 15);
+			chip->esr_data[i].post_esr_i = I_RAW_TO_UA(ibat);
+
+			chip->esr_data[i].valid = true;
+
+			if ((int)chip->esr_data[i].pre_esr_i < 0)
+				is_charging = true;
+
+			qg_dbg(chip, QG_DEBUG_ESR,
+				"ESR values for %d iteration pre_v=%d pre_i=%d post_v=%d post_i=%d\n",
+				i, chip->esr_data[i].pre_esr_v,
+				(int)chip->esr_data[i].pre_esr_i,
+				chip->esr_data[i].post_esr_v,
+				(int)chip->esr_data[i].post_esr_i);
 		}
-
-		rc = qg_master_hold(chip, false);
-		if (rc < 0) {
-			pr_err("Failed to release master, rc=%d\n", rc);
-			goto done;
-		}
-		/* FIFOs restarted */
-		chip->last_fifo_update_time = ktime_get();
-
-		/* signal the read thread */
-		chip->data_ready = true;
-		wake_up_interruptible(&chip->qg_wait_q);
-		chip->last_user_update_time = now;
-
-		/* vote to stay awake until userspace reads data */
-		vote(chip->awake_votable, FIFO_RT_DONE_VOTER, true, 0);
-	} else {
-		qg_dbg(chip, QG_DEBUG_FIFO, "FIFO processing too early time_delta=%lld\n",
-							time_delta);
+		/* delay before the next ESR measurement */
+		msleep(200);
 	}
+
+	rc = qg_process_esr_data(chip);
+	if (rc < 0)
+		pr_err("Failed to process ESR data rc=%d\n", rc);
+
+	rc = qg_master_hold(chip, false);
+	if (rc < 0) {
+		pr_err("Failed to release master, rc=%d\n", rc);
+		goto done;
+	}
+
+	if (chip->esr_avg) {
+		chip->kdata.param[QG_ESR].data = chip->esr_avg;
+		chip->kdata.param[QG_ESR].valid = true;
+		qg_dbg(chip, QG_DEBUG_ESR, "ESR_SW=%d during %s\n",
+			chip->esr_avg, is_charging ? "CHARGE" : "DISCHARGE");
+		qg_retrieve_esr_params(chip);
+		chip->esr_actual = chip->esr_avg;
+	}
+
+	return 0;
 done:
 	qg_master_hold(chip, false);
 	return rc;
@@ -654,6 +1030,9 @@
 	if (chip->udata.param[QG_BATT_SOC].valid)
 		chip->batt_soc = chip->udata.param[QG_BATT_SOC].data;
 
+	if (chip->udata.param[QG_FULL_SOC].valid)
+		chip->full_soc = chip->udata.param[QG_FULL_SOC].data;
+
 	if (chip->udata.param[QG_SOC].valid) {
 		qg_dbg(chip, QG_DEBUG_SOC, "udata SOC=%d last SOC=%d\n",
 			chip->udata.param[QG_SOC].data, chip->catch_up_soc);
@@ -669,7 +1048,7 @@
 				chip->udata.param[QG_RBAT_MOHM].data;
 		chip->sdam_data[SDAM_VALID] = 1;
 
-		rc = qg_update_sdam_params(chip);
+		rc = qg_store_soc_params(chip);
 		if (rc < 0)
 			pr_err("Failed to update SDAM params, rc=%d\n", rc);
 	}
@@ -678,18 +1057,25 @@
 		chip->charge_counter_uah =
 			chip->udata.param[QG_CHARGE_COUNTER].data;
 
+	if (chip->udata.param[QG_ESR].valid)
+		chip->esr_last = chip->udata.param[QG_ESR].data;
+
+	if (chip->esr_actual != -EINVAL && chip->udata.param[QG_ESR].valid) {
+		chip->esr_nominal = chip->udata.param[QG_ESR].data;
+		if (chip->qg_psy)
+			power_supply_changed(chip->qg_psy);
+	}
+
+	if (!chip->dt.esr_disable)
+		qg_store_esr_params(chip);
+
+	qg_dbg(chip, QG_DEBUG_STATUS, "udata update: batt_soc=%d cc_soc=%d full_soc=%d qg_esr=%d\n",
+		(chip->batt_soc != INT_MIN) ? chip->batt_soc : -EINVAL,
+		(chip->cc_soc != INT_MIN) ? chip->cc_soc : -EINVAL,
+		chip->full_soc, chip->esr_last);
 	vote(chip->awake_votable, UDATA_READY_VOTER, false, 0);
 }
 
-static irqreturn_t qg_default_irq_handler(int irq, void *data)
-{
-	struct qpnp_qg *chip = data;
-
-	qg_dbg(chip, QG_DEBUG_IRQ, "IRQ triggered\n");
-
-	return IRQ_HANDLED;
-}
-
 #define MAX_FIFO_DELTA_PERCENT		10
 static irqreturn_t qg_fifo_update_done_handler(int irq, void *data)
 {
@@ -717,6 +1103,9 @@
 		goto done;
 	}
 
+	if (++chip->fifo_done_count == U32_MAX)
+		chip->fifo_done_count = 0;
+
 	rc = qg_vbat_thresholds_config(chip);
 	if (rc < 0)
 		pr_err("Failed to apply VBAT EMPTY config rc=%d\n", rc);
@@ -727,6 +1116,12 @@
 		goto done;
 	}
 
+	rc = qg_esr_estimate(chip);
+	if (rc < 0) {
+		pr_err("Failed to estimate ESR, rc=%d\n", rc);
+		goto done;
+	}
+
 	rc = get_fifo_done_time(chip, false, &hw_delta_ms);
 	if (rc < 0)
 		hw_delta_ms = 0;
@@ -766,9 +1161,14 @@
 		pr_err("Failed to read RT status, rc=%d\n", rc);
 		goto done;
 	}
+	/* ignore VBAT low if battery is missing */
+	if ((status & BATTERY_MISSING_INT_RT_STS_BIT) ||
+			chip->battery_missing)
+		goto done;
+
 	chip->vbat_low = !!(status & VBAT_LOW_INT_RT_STS_BIT);
 
-	rc = process_rt_fifo_data(chip, chip->vbat_low, false);
+	rc = process_rt_fifo_data(chip, true, false);
 	if (rc < 0)
 		pr_err("Failed to process RT FIFO data, rc=%d\n", rc);
 
@@ -782,8 +1182,20 @@
 {
 	struct qpnp_qg *chip = data;
 	u32 ocv_uv = 0;
+	int rc;
+	u8 status = 0;
 
 	qg_dbg(chip, QG_DEBUG_IRQ, "IRQ triggered\n");
+
+	rc = qg_read(chip, chip->qg_base + QG_INT_RT_STS_REG, &status, 1);
+	if (rc < 0)
+		pr_err("Failed to read RT status rc=%d\n", rc);
+
+	/* ignore VBAT empty if battery is missing */
+	if ((status & BATTERY_MISSING_INT_RT_STS_BIT) ||
+			chip->battery_missing)
+		return IRQ_HANDLED;
+
 	pr_warn("VBATT EMPTY SOC = 0\n");
 
 	chip->catch_up_soc = 0;
@@ -794,7 +1206,7 @@
 	chip->sdam_data[SDAM_OCV_UV] = ocv_uv;
 	chip->sdam_data[SDAM_VALID] = 1;
 
-	qg_update_sdam_params(chip);
+	qg_store_soc_params(chip);
 
 	if (chip->qg_psy)
 		power_supply_changed(chip->qg_psy);
@@ -844,7 +1256,6 @@
 static struct qg_irq_info qg_irqs[] = {
 	[QG_BATT_MISSING_IRQ] = {
 		.name		= "qg-batt-missing",
-		.handler	= qg_default_irq_handler,
 	},
 	[QG_VBATT_LOW_IRQ] = {
 		.name		= "qg-vbat-low",
@@ -868,11 +1279,9 @@
 	},
 	[QG_FSM_STAT_CHG_IRQ] = {
 		.name		= "qg-fsm-state-chg",
-		.handler	= qg_default_irq_handler,
 	},
 	[QG_EVENT_IRQ] = {
 		.name		= "qg-event",
-		.handler	= qg_default_irq_handler,
 	},
 };
 
@@ -971,7 +1380,7 @@
 		return -ENODEV;
 
 	if (chip->battery_missing || !chip->profile_loaded)
-		return -EPERM;
+		return -ENODEV;
 
 	rc = qg_sdam_multibyte_read(QG_SDAM_LEARNED_CAPACITY_OFFSET,
 					(u8 *)&cc_mah, 2);
@@ -996,7 +1405,7 @@
 		return -ENODEV;
 
 	if (chip->battery_missing || !learned_cap_uah)
-		return -EPERM;
+		return -ENODEV;
 
 	cc_mah = div64_s64(learned_cap_uah, 1000);
 	rc = qg_sdam_multibyte_write(QG_SDAM_LEARNED_CAPACITY_OFFSET,
@@ -1036,7 +1445,7 @@
 		return -ENODEV;
 
 	if (chip->battery_missing || !chip->profile_loaded)
-		return -EPERM;
+		return -ENODEV;
 
 	if (!buf || length > BUCKET_COUNT)
 		return -EINVAL;
@@ -1064,7 +1473,7 @@
 		return -ENODEV;
 
 	if (chip->battery_missing || !chip->profile_loaded)
-		return -EPERM;
+		return -ENODEV;
 
 	if (!buf || length > BUCKET_COUNT * 2 || id < 0 ||
 		id > BUCKET_COUNT - 1 ||
@@ -1195,6 +1604,87 @@
 	return 0;
 }
 
+static int qg_get_ttf_param(void *data, enum ttf_param param, int *val)
+{
+	union power_supply_propval prop = {0, };
+	struct qpnp_qg *chip = data;
+	int rc = 0;
+	int64_t temp = 0;
+
+	if (!chip)
+		return -ENODEV;
+
+	switch (param) {
+	case TTF_VALID:
+		*val = (!chip->battery_missing && chip->profile_loaded);
+		break;
+	case TTF_MSOC:
+		rc = qg_get_battery_capacity(chip, val);
+		break;
+	case TTF_VBAT:
+		rc = qg_get_battery_voltage(chip, val);
+		break;
+	case TTF_IBAT:
+		rc = qg_get_battery_current(chip, val);
+		break;
+	case TTF_FCC:
+		if (chip->qg_psy) {
+			rc = power_supply_get_property(chip->qg_psy,
+				POWER_SUPPLY_PROP_CHARGE_FULL, &prop);
+			if (rc >= 0) {
+				temp = div64_u64(prop.intval, 1000);
+				*val  = div64_u64(chip->full_soc * temp,
+						QG_SOC_FULL);
+			}
+		}
+		break;
+	case TTF_MODE:
+		*val = TTF_MODE_NORMAL;
+		break;
+	case TTF_ITERM:
+		if (chip->chg_iterm_ma == INT_MIN)
+			*val = 0;
+		else
+			*val = chip->chg_iterm_ma;
+		break;
+	case TTF_RBATT:
+		rc = qg_sdam_read(SDAM_RBAT_MOHM, val);
+		if (!rc)
+			*val *= 1000;
+		break;
+	case TTF_VFLOAT:
+		*val = chip->bp.float_volt_uv;
+		break;
+	case TTF_CHG_TYPE:
+		*val = chip->charge_type;
+		break;
+	case TTF_CHG_STATUS:
+		*val = chip->charge_status;
+		break;
+	default:
+		pr_err("Unsupported property %d\n", param);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int qg_ttf_awake_voter(void *data, bool val)
+{
+	struct qpnp_qg *chip = data;
+
+	if (!chip)
+		return -ENODEV;
+
+	if (chip->battery_missing || !chip->profile_loaded)
+		return -ENODEV;
+
+	vote(chip->awake_votable, TTF_AWAKE_VOTER, val, 0);
+
+	return 0;
+}
+
 static int qg_psy_set_property(struct power_supply *psy,
 			       enum power_supply_property psp,
 			       const union power_supply_propval *pval)
@@ -1222,6 +1712,17 @@
 			chip->cl->learned_cap_uah = pval->intval;
 		mutex_unlock(&chip->cl->lock);
 		break;
+	case POWER_SUPPLY_PROP_SOH:
+		chip->soh = pval->intval;
+		qg_dbg(chip, QG_DEBUG_STATUS, "SOH update: SOH=%d esr_actual=%d esr_nominal=%d\n",
+				chip->soh, chip->esr_actual, chip->esr_nominal);
+		break;
+	case POWER_SUPPLY_PROP_ESR_ACTUAL:
+		chip->esr_actual = pval->intval;
+		break;
+	case POWER_SUPPLY_PROP_ESR_NOMINAL:
+		chip->esr_nominal = pval->intval;
+		break;
 	default:
 		break;
 	}
@@ -1265,6 +1766,12 @@
 		if (!rc)
 			pval->intval *= 1000;
 		break;
+	case POWER_SUPPLY_PROP_RESISTANCE_NOW:
+		pval->intval = chip->esr_last;
+		break;
+	case POWER_SUPPLY_PROP_SOC_REPORTING_READY:
+		pval->intval = chip->soc_reporting_ready;
+		break;
 	case POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE:
 		pval->intval = chip->dt.rbat_conn_mohm;
 		break;
@@ -1307,6 +1814,23 @@
 	case POWER_SUPPLY_PROP_CYCLE_COUNT:
 		rc = get_cycle_count(chip->counter, &pval->intval);
 		break;
+	case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+		rc = ttf_get_time_to_full(chip->ttf, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+		rc = ttf_get_time_to_empty(chip->ttf, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_ESR_ACTUAL:
+		pval->intval = (chip->esr_actual == -EINVAL) ?  -EINVAL :
+					(chip->esr_actual * 1000);
+		break;
+	case POWER_SUPPLY_PROP_ESR_NOMINAL:
+		pval->intval = (chip->esr_nominal == -EINVAL) ?  -EINVAL :
+					(chip->esr_nominal * 1000);
+		break;
+	case POWER_SUPPLY_PROP_SOH:
+		pval->intval = chip->soh;
+		break;
 	default:
 		pr_debug("Unsupported property %d\n", psp);
 		break;
@@ -1320,6 +1844,9 @@
 {
 	switch (psp) {
 	case POWER_SUPPLY_PROP_CHARGE_FULL:
+	case POWER_SUPPLY_PROP_ESR_ACTUAL:
+	case POWER_SUPPLY_PROP_ESR_NOMINAL:
+	case POWER_SUPPLY_PROP_SOH:
 		return 1;
 	default:
 		break;
@@ -1336,6 +1863,8 @@
 	POWER_SUPPLY_PROP_CHARGE_COUNTER,
 	POWER_SUPPLY_PROP_RESISTANCE,
 	POWER_SUPPLY_PROP_RESISTANCE_ID,
+	POWER_SUPPLY_PROP_RESISTANCE_NOW,
+	POWER_SUPPLY_PROP_SOC_REPORTING_READY,
 	POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE,
 	POWER_SUPPLY_PROP_DEBUG_BATTERY,
 	POWER_SUPPLY_PROP_BATTERY_TYPE,
@@ -1347,6 +1876,11 @@
 	POWER_SUPPLY_PROP_CYCLE_COUNTS,
 	POWER_SUPPLY_PROP_CHARGE_FULL,
 	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
+	POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+	POWER_SUPPLY_PROP_ESR_ACTUAL,
+	POWER_SUPPLY_PROP_ESR_NOMINAL,
+	POWER_SUPPLY_PROP_SOH,
 };
 
 static const struct power_supply_desc qg_psy_desc = {
@@ -1433,6 +1967,7 @@
 {
 	int rc;
 	bool parallel_enabled = is_parallel_enabled(chip);
+	bool update_smb = false;
 
 	if (parallel_enabled == chip->parallel_enabled)
 		return 0;
@@ -1443,7 +1978,14 @@
 
 	mutex_lock(&chip->data_lock);
 
-	rc = process_rt_fifo_data(chip, false, true);
+	/*
+	 * Parallel charger uses the same external sense, hence do not
+	 * enable SMB sensing if PMI632 is configured for external sense.
+	 */
+	if (!chip->dt.qg_ext_sense)
+		update_smb = true;
+
+	rc = process_rt_fifo_data(chip, false, update_smb);
 	if (rc < 0)
 		pr_err("Failed to process RT FIFO data, rc=%d\n", rc);
 
@@ -1468,6 +2010,110 @@
 	return 0;
 }
 
+static int qg_handle_battery_removal(struct qpnp_qg *chip)
+{
+	int rc, length = QG_SDAM_MAX_OFFSET - QG_SDAM_VALID_OFFSET;
+	u8 *data;
+
+	/* clear SDAM */
+	data = kcalloc(length, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	rc = qg_sdam_multibyte_write(QG_SDAM_VALID_OFFSET, data, length);
+	if (rc < 0)
+		pr_err("Failed to clear SDAM rc=%d\n", rc);
+
+	return rc;
+}
+
+#define MAX_QG_OK_RETRIES	20
+static int qg_handle_battery_insertion(struct qpnp_qg *chip)
+{
+	int rc, count = 0;
+	u32 ocv_uv = 0, ocv_raw = 0;
+	u8 reg = 0;
+
+	do {
+		rc = qg_read(chip, chip->qg_base + QG_STATUS1_REG, &reg, 1);
+		if (rc < 0) {
+			pr_err("Failed to read STATUS1_REG rc=%d\n", rc);
+			return rc;
+		}
+
+		if (reg & QG_OK_BIT)
+			break;
+
+		msleep(200);
+		count++;
+	} while (count < MAX_QG_OK_RETRIES);
+
+	if (count == MAX_QG_OK_RETRIES) {
+		qg_dbg(chip, QG_DEBUG_STATUS, "QG_OK not set!\n");
+		return 0;
+	}
+
+	/* read S7 PON OCV */
+	rc = qg_read_ocv(chip, &ocv_uv, &ocv_raw, S7_PON_OCV);
+	if (rc < 0) {
+		pr_err("Failed to read PON OCV rc=%d\n", rc);
+		return rc;
+	}
+
+	qg_dbg(chip, QG_DEBUG_STATUS,
+		"S7_OCV on battery insertion = %duV\n", ocv_uv);
+
+	chip->kdata.param[QG_GOOD_OCV_UV].data = ocv_uv;
+	chip->kdata.param[QG_GOOD_OCV_UV].valid = true;
+	/* clear all the userspace data */
+	chip->kdata.param[QG_CLEAR_LEARNT_DATA].data = 1;
+	chip->kdata.param[QG_CLEAR_LEARNT_DATA].valid = true;
+
+	vote(chip->awake_votable, GOOD_OCV_VOTER, true, 0);
+	/* signal the read thread */
+	chip->data_ready = true;
+	wake_up_interruptible(&chip->qg_wait_q);
+
+	return 0;
+}
+
+static int qg_battery_status_update(struct qpnp_qg *chip)
+{
+	int rc;
+	union power_supply_propval prop = {0, };
+
+	if (!is_batt_available(chip))
+		return 0;
+
+	mutex_lock(&chip->data_lock);
+
+	rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_PRESENT, &prop);
+	if (rc < 0) {
+		pr_err("Failed to get battery-present, rc=%d\n", rc);
+		goto done;
+	}
+
+	if (chip->battery_missing && prop.intval) {
+		pr_warn("Battery inserted!\n");
+		rc = qg_handle_battery_insertion(chip);
+		if (rc < 0)
+			pr_err("Failed in battery-insertion rc=%d\n", rc);
+	} else if (!chip->battery_missing && !prop.intval) {
+		pr_warn("Battery removed!\n");
+		rc = qg_handle_battery_removal(chip);
+		if (rc < 0)
+			pr_err("Failed in battery-removal rc=%d\n", rc);
+	}
+
+	chip->battery_missing = !prop.intval;
+
+done:
+	mutex_unlock(&chip->data_lock);
+	return rc;
+}
+
+
 static void qg_status_change_work(struct work_struct *work)
 {
 	struct qpnp_qg *chip = container_of(work,
@@ -1480,6 +2126,17 @@
 		goto out;
 	}
 
+	rc = qg_battery_status_update(chip);
+	if (rc < 0)
+		pr_err("Failed to process battery status update rc=%d\n", rc);
+
+	rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
+	if (rc < 0)
+		pr_err("Failed to get charge-type, rc=%d\n", rc);
+	else
+		chip->charge_type = prop.intval;
+
 	rc = power_supply_get_property(chip->batt_psy,
 			POWER_SUPPLY_PROP_STATUS, &prop);
 	if (rc < 0)
@@ -1526,6 +2183,8 @@
 	rc = qg_charge_full_update(chip);
 	if (rc < 0)
 		pr_err("Failed in charge_full_update, rc=%d\n", rc);
+
+	ttf_update(chip->ttf, chip->usb_present);
 out:
 	pm_relax(chip->dev);
 }
@@ -1867,6 +2526,7 @@
 		qg_dbg(chip, QG_DEBUG_PROFILE, "Battery Missing!\n");
 		chip->battery_missing = true;
 		chip->profile_loaded = false;
+		chip->soc_reporting_ready = true;
 	} else {
 		/* battery present */
 		rc = get_batt_id_ohm(chip, &chip->batt_id_ohm);
@@ -1875,11 +2535,14 @@
 			chip->profile_loaded = false;
 		} else {
 			rc = qg_load_battery_profile(chip);
-			if (rc < 0)
+			if (rc < 0) {
 				pr_err("Failed to load battery-profile rc=%d\n",
 								rc);
-			else
+				chip->profile_loaded = false;
+				chip->soc_reporting_ready = true;
+			} else {
 				chip->profile_loaded = true;
+			}
 		}
 	}
 
@@ -1890,12 +2553,21 @@
 	return 0;
 }
 
+
+static struct ocv_all ocv[] = {
+	[S7_PON_OCV] = { 0, 0, "S7_PON_OCV"},
+	[S3_GOOD_OCV] = { 0, 0, "S3_GOOD_OCV"},
+	[S3_LAST_OCV] = { 0, 0, "S3_LAST_OCV"},
+	[SDAM_PON_OCV] = { 0, 0, "SDAM_PON_OCV"},
+};
+
+#define S7_ERROR_MARGIN_UV		20000
 static int qg_determine_pon_soc(struct qpnp_qg *chip)
 {
-	int rc = 0, batt_temp = 0;
+	int rc = 0, batt_temp = 0, i;
 	bool use_pon_ocv = true;
 	unsigned long rtc_sec = 0;
-	u32 ocv_uv = 0, ocv_raw = 0, soc = 0, shutdown[SDAM_MAX] = {0};
+	u32 ocv_uv = 0, soc = 0, shutdown[SDAM_MAX] = {0};
 	char ocv_type[20] = "NONE";
 
 	if (!chip->profile_loaded) {
@@ -1944,33 +2616,47 @@
 			goto done;
 		}
 
-		/*
-		 * Read S3_LAST_OCV, if S3_LAST_OCV is invalid,
-		 * read the SDAM_PON_OCV
-		 * if SDAM is not-set, use S7_PON_OCV.
-		 */
-		strlcpy(ocv_type, "S3_LAST_SOC", 20);
-		rc = qg_read_ocv(chip, &ocv_uv, &ocv_raw, S3_LAST_OCV);
-		if (rc < 0)
-			goto done;
-
-		if (ocv_raw == FIFO_V_RESET_VAL) {
-			/* S3_LAST_OCV is invalid */
-			strlcpy(ocv_type, "SDAM_PON_SOC", 20);
-			rc = qg_read_ocv(chip, &ocv_uv, &ocv_raw, SDAM_PON_OCV);
+		/* read all OCVs */
+		for (i = S7_PON_OCV; i < PON_OCV_MAX; i++) {
+			rc = qg_read_ocv(chip, &ocv[i].ocv_uv,
+						&ocv[i].ocv_raw, i);
 			if (rc < 0)
-				goto done;
+				pr_err("Failed to read %s OCV rc=%d\n",
+						ocv[i].ocv_type, rc);
+			else
+				qg_dbg(chip, QG_DEBUG_PON, "%s OCV=%d\n",
+					ocv[i].ocv_type, ocv[i].ocv_uv);
+		}
 
-			if (!ocv_uv) {
-				/* SDAM_PON_OCV is not set */
+		if (ocv[S3_LAST_OCV].ocv_raw == FIFO_V_RESET_VAL) {
+			if (!ocv[SDAM_PON_OCV].ocv_uv) {
 				strlcpy(ocv_type, "S7_PON_SOC", 20);
-				rc = qg_read_ocv(chip, &ocv_uv, &ocv_raw,
-							S7_PON_OCV);
-				if (rc < 0)
-					goto done;
+				ocv_uv = ocv[S7_PON_OCV].ocv_uv;
+			} else if (ocv[SDAM_PON_OCV].ocv_uv <=
+					ocv[S7_PON_OCV].ocv_uv) {
+				strlcpy(ocv_type, "S7_PON_SOC", 20);
+				ocv_uv = ocv[S7_PON_OCV].ocv_uv;
+			} else if (!shutdown[SDAM_VALID] &&
+				((ocv[SDAM_PON_OCV].ocv_uv -
+					ocv[S7_PON_OCV].ocv_uv) >
+					S7_ERROR_MARGIN_UV)) {
+				strlcpy(ocv_type, "S7_PON_SOC", 20);
+				ocv_uv = ocv[S7_PON_OCV].ocv_uv;
+			} else {
+				strlcpy(ocv_type, "SDAM_PON_SOC", 20);
+				ocv_uv = ocv[SDAM_PON_OCV].ocv_uv;
+			}
+		} else {
+			if (ocv[S3_LAST_OCV].ocv_uv >= ocv[S7_PON_OCV].ocv_uv) {
+				strlcpy(ocv_type, "S3_LAST_SOC", 20);
+				ocv_uv = ocv[S3_LAST_OCV].ocv_uv;
+			} else {
+				strlcpy(ocv_type, "S7_PON_SOC", 20);
+				ocv_uv = ocv[S7_PON_OCV].ocv_uv;
 			}
 		}
 
+		ocv_uv = CAP(QG_MIN_OCV_UV, QG_MAX_OCV_UV, ocv_uv);
 		rc = lookup_soc_ocv(&soc, ocv_uv, batt_temp, false);
 		if (rc < 0) {
 			pr_err("Failed to lookup SOC@PON rc=%d\n", rc);
@@ -1996,13 +2682,16 @@
 	if (rc < 0)
 		pr_err("Failed to update MSOC register rc=%d\n", rc);
 
-	rc = qg_update_sdam_params(chip);
+	rc = qg_store_soc_params(chip);
 	if (rc < 0)
 		pr_err("Failed to update sdam params rc=%d\n", rc);
 
 	pr_info("using %s @ PON ocv_uv=%duV soc=%d\n",
 			ocv_type, ocv_uv, chip->msoc);
 
+	/* SOC reporting is now ready */
+	chip->soc_reporting_ready = 1;
+
 	return 0;
 }
 
@@ -2025,6 +2714,7 @@
 	return 0;
 }
 
+#define ADC_CONV_DLY_512MS		0xA
 static int qg_hw_init(struct qpnp_qg *chip)
 {
 	int rc, temp;
@@ -2205,6 +2895,22 @@
 		return rc;
 	}
 
+	/* disable S5 */
+	rc = qg_masked_write(chip, chip->qg_base +
+				QG_S5_OCV_VALIDATE_MEAS_CTL1_REG,
+				ALLOW_S5_BIT, 0);
+	if (rc < 0)
+		pr_err("Failed to disable S5 rc=%d\n", rc);
+
+	/* change PON OCV time to 512ms */
+	rc = qg_masked_write(chip, chip->qg_base +
+				QG_S7_PON_OCV_MEAS_CTL1_REG,
+				ADC_CONV_DLY_MASK,
+				ADC_CONV_DLY_512MS);
+	if (rc < 0)
+		pr_err("Failed to reconfigure S7-delay rc=%d\n", rc);
+
+
 	return 0;
 }
 
@@ -2224,6 +2930,10 @@
 				QG_INIT_STATE_IRQ_DISABLE, true, 0);
 	}
 
+	/* restore ESR data */
+	if (!chip->dt.esr_disable)
+		qg_retrieve_esr_params(chip);
+
 	return 0;
 }
 
@@ -2298,10 +3008,12 @@
 	return 0;
 }
 
+#define QG_TTF_ITERM_DELTA_MA		1
 static int qg_alg_init(struct qpnp_qg *chip)
 {
 	struct cycle_counter *counter;
 	struct cap_learning *cl;
+	struct ttf *ttf;
 	struct device_node *node = chip->dev->of_node;
 	int rc;
 
@@ -2324,6 +3036,28 @@
 
 	chip->counter = counter;
 
+	ttf = devm_kzalloc(chip->dev, sizeof(*ttf), GFP_KERNEL);
+	if (!ttf)
+		return -ENOMEM;
+
+	ttf->get_ttf_param = qg_get_ttf_param;
+	ttf->awake_voter = qg_ttf_awake_voter;
+	ttf->iterm_delta = QG_TTF_ITERM_DELTA_MA;
+	ttf->data = chip;
+
+	rc = ttf_tte_init(ttf);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in initializing ttf, rc:%d\n",
+			rc);
+		ttf->data = NULL;
+		counter->data = NULL;
+		devm_kfree(chip->dev, ttf);
+		devm_kfree(chip->dev, counter);
+		return rc;
+	}
+
+	chip->ttf = ttf;
+
 	chip->dt.cl_disable = of_property_read_bool(node,
 					"qcom,cl-disable");
 
@@ -2348,6 +3082,7 @@
 		counter->data = NULL;
 		cl->data = NULL;
 		devm_kfree(chip->dev, counter);
+		devm_kfree(chip->dev, ttf);
 		devm_kfree(chip->dev, cl);
 		return rc;
 	}
@@ -2373,10 +3108,13 @@
 #define DEFAULT_CL_MAX_START_SOC	15
 #define DEFAULT_CL_MIN_TEMP_DECIDEGC	150
 #define DEFAULT_CL_MAX_TEMP_DECIDEGC	500
-#define DEFAULT_CL_MAX_INC_DECIPERC	5
-#define DEFAULT_CL_MAX_DEC_DECIPERC	100
-#define DEFAULT_CL_MIN_LIM_DECIPERC	0
-#define DEFAULT_CL_MAX_LIM_DECIPERC	0
+#define DEFAULT_CL_MAX_INC_DECIPERC	10
+#define DEFAULT_CL_MAX_DEC_DECIPERC	20
+#define DEFAULT_CL_MIN_LIM_DECIPERC	500
+#define DEFAULT_CL_MAX_LIM_DECIPERC	100
+#define DEFAULT_ESR_QUAL_CURRENT_UA	130000
+#define DEFAULT_ESR_QUAL_VBAT_UV	7000
+#define DEFAULT_ESR_DISABLE_SOC		1000
 static int qg_parse_dt(struct qpnp_qg *chip)
 {
 	int rc = 0;
@@ -2570,6 +3308,33 @@
 	else
 		chip->dt.rbat_conn_mohm = temp;
 
+	/* esr */
+	chip->dt.esr_disable = of_property_read_bool(node,
+					"qcom,esr-disable");
+
+	chip->dt.esr_discharge_enable = of_property_read_bool(node,
+					"qcom,esr-discharge-enable");
+
+	rc = of_property_read_u32(node, "qcom,esr-qual-current-ua", &temp);
+	if (rc < 0)
+		chip->dt.esr_qual_i_ua = DEFAULT_ESR_QUAL_CURRENT_UA;
+	else
+		chip->dt.esr_qual_i_ua = temp;
+
+	rc = of_property_read_u32(node, "qcom,esr-qual-vbatt-uv", &temp);
+	if (rc < 0)
+		chip->dt.esr_qual_v_uv = DEFAULT_ESR_QUAL_VBAT_UV;
+	else
+		chip->dt.esr_qual_v_uv = temp;
+
+	rc = of_property_read_u32(node, "qcom,esr-disable-soc", &temp);
+	if (rc < 0)
+		chip->dt.esr_disable_soc = DEFAULT_ESR_DISABLE_SOC;
+	else
+		chip->dt.esr_disable_soc = temp * 100;
+
+	chip->dt.qg_ext_sense = of_property_read_bool(node, "qcom,qg-ext-sns");
+
 	/* Capacity learning params*/
 	if (!chip->dt.cl_disable) {
 		chip->dt.cl_feedback_on = of_property_read_bool(node,
@@ -2629,9 +3394,9 @@
 			chip->cl->dt.min_start_soc, chip->cl->dt.max_start_soc,
 			chip->cl->dt.min_temp, chip->cl->dt.max_temp);
 	}
-	qg_dbg(chip, QG_DEBUG_PON, "DT: vbatt_empty_mv=%dmV vbatt_low_mv=%dmV delta_soc=%d\n",
+	qg_dbg(chip, QG_DEBUG_PON, "DT: vbatt_empty_mv=%dmV vbatt_low_mv=%dmV delta_soc=%d ext-sns=%d\n",
 			chip->dt.vbatt_empty_mv, chip->dt.vbatt_low_mv,
-			chip->dt.delta_soc);
+			chip->dt.delta_soc, chip->dt.qg_ext_sense);
 
 	return 0;
 }
@@ -2646,6 +3411,7 @@
 	if (!chip->profile_loaded)
 		return 0;
 
+	cancel_delayed_work_sync(&chip->ttf->ttf_work);
 	/* disable GOOD_OCV IRQ in sleep */
 	vote(chip->good_ocv_irq_disable_votable,
 			QG_INIT_STATE_IRQ_DISABLE, true, 0);
@@ -2778,6 +3544,8 @@
 		chip->suspend_data = false;
 	}
 
+	schedule_delayed_work(&chip->ttf->ttf_work, 0);
+
 	return rc;
 }
 
@@ -2855,6 +3623,11 @@
 	chip->maint_soc = -EINVAL;
 	chip->batt_soc = INT_MIN;
 	chip->cc_soc = INT_MIN;
+	chip->full_soc = QG_SOC_FULL;
+	chip->chg_iterm_ma = INT_MIN;
+	chip->soh = -EINVAL;
+	chip->esr_actual = -EINVAL;
+	chip->esr_nominal = -EINVAL;
 
 	rc = qg_alg_init(chip);
 	if (rc < 0) {
@@ -2920,6 +3693,7 @@
 			pr_err("Error in restoring cycle_count, rc=%d\n", rc);
 			return rc;
 		}
+		schedule_delayed_work(&chip->ttf->ttf_work, 10000);
 	}
 
 	rc = qg_determine_pon_soc(chip);
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 5df241f..56a09c6 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -331,6 +331,9 @@
 	if (rc < 0)
 		chg->otg_delay_ms = OTG_DEFAULT_DEGLITCH_TIME_MS;
 
+	chg->disable_stat_sw_override = of_property_read_bool(node,
+					"qcom,disable-stat-sw-override");
+
 	return 0;
 }
 
@@ -342,6 +345,7 @@
 	POWER_SUPPLY_PROP_PRESENT,
 	POWER_SUPPLY_PROP_ONLINE,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
 	POWER_SUPPLY_PROP_PD_CURRENT_MAX,
 	POWER_SUPPLY_PROP_CURRENT_MAX,
@@ -398,6 +402,9 @@
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smblib_get_prop_usb_voltage_max(chg, val);
 		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+		rc = smblib_get_prop_usb_voltage_max_design(chg, val);
+		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
 		rc = smblib_get_prop_usb_voltage_now(chg, val);
 		break;
@@ -991,6 +998,8 @@
 	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
 	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
 	POWER_SUPPLY_PROP_CHARGE_COUNTER,
+	POWER_SUPPLY_PROP_CHARGE_FULL,
+	POWER_SUPPLY_PROP_CYCLE_COUNT,
 };
 
 static int smb2_batt_get_prop(struct power_supply *psy,
@@ -1048,9 +1057,6 @@
 	case POWER_SUPPLY_PROP_SW_JEITA_ENABLED:
 		val->intval = chg->sw_jeita_enabled;
 		break;
-	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-		rc = smblib_get_prop_batt_voltage_now(chg, val);
-		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		val->intval = get_client_vote(chg->fv_votable,
 				BATT_PROFILE_VOTER);
@@ -1062,9 +1068,6 @@
 		val->intval = get_client_vote_locked(chg->fv_votable,
 				QNOVO_VOTER);
 		break;
-	case POWER_SUPPLY_PROP_CURRENT_NOW:
-		rc = smblib_get_prop_batt_current_now(chg, val);
-		break;
 	case POWER_SUPPLY_PROP_CURRENT_QNOVO:
 		val->intval = get_client_vote_locked(chg->fcc_votable,
 				QNOVO_VOTER);
@@ -1073,9 +1076,6 @@
 		val->intval = get_client_vote(chg->fcc_votable,
 					      BATT_PROFILE_VOTER);
 		break;
-	case POWER_SUPPLY_PROP_TEMP:
-		rc = smblib_get_prop_batt_temp(chg, val);
-		break;
 	case POWER_SUPPLY_PROP_TECHNOLOGY:
 		val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
 		break;
@@ -1103,7 +1103,12 @@
 		val->intval = 0;
 		break;
 	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
-		rc = smblib_get_prop_batt_charge_counter(chg, val);
+	case POWER_SUPPLY_PROP_CHARGE_FULL:
+	case POWER_SUPPLY_PROP_CYCLE_COUNT:
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+	case POWER_SUPPLY_PROP_TEMP:
+		rc = smblib_get_prop_from_bms(chg, psp, val);
 		break;
 	default:
 		pr_err("batt power supply prop %d not supported\n", psp);
@@ -1837,6 +1842,16 @@
 		}
 	}
 
+	if (chg->disable_stat_sw_override) {
+		rc = smblib_masked_write(chg, STAT_CFG_REG,
+				STAT_SW_OVERRIDE_CFG_BIT, 0);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't disable STAT SW override rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
 	return rc;
 }
 
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index 4fd659e..9840396 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -61,7 +61,7 @@
 	},
 	.icl_stat		= {
 		.name   = "input current limit status",
-		.reg    = AICL_ICL_STATUS_REG,
+		.reg    = ICL_STATUS_REG,
 		.min_u  = 0,
 		.max_u  = 3000000,
 		.step_u = 50000,
@@ -206,6 +206,13 @@
 	USBIN_VOLTAGE,
 };
 
+enum {
+	BAT_THERM = 0,
+	MISC_THERM,
+	CONN_THERM,
+	SMB_THERM,
+};
+
 #define PMI632_MAX_ICL_UA	3000000
 static int smb5_chg_config_init(struct smb5 *chip)
 {
@@ -244,7 +251,7 @@
 		chg->use_extcon = true;
 		chg->name = "pmi632_charger";
 		/* PMI632 does not support PD */
-		__pd_disabled = 1;
+		chg->pd_not_supported = true;
 		chg->hw_max_icl_ua =
 			(chip->dt.usb_icl_ua > 0) ? chip->dt.usb_icl_ua
 						: PMI632_MAX_ICL_UA;
@@ -266,7 +273,55 @@
 	return rc;
 }
 
+#define PULL_NO_PULL	0
+#define PULL_30K	30
+#define PULL_100K	100
+#define PULL_400K	400
+static int get_valid_pullup(int pull_up)
+{
+	int pull;
+
+	/* pull up can only be 0/30K/100K/400K) */
+	switch (pull_up) {
+	case PULL_NO_PULL:
+		pull = INTERNAL_PULL_NO_PULL;
+		break;
+	case PULL_30K:
+		pull = INTERNAL_PULL_30K_PULL;
+		break;
+	case PULL_100K:
+		pull = INTERNAL_PULL_100K_PULL;
+		break;
+	case PULL_400K:
+		pull = INTERNAL_PULL_400K_PULL;
+		break;
+	default:
+		pull = INTERNAL_PULL_100K_PULL;
+	}
+
+	return pull;
+}
+
+#define INTERNAL_PULL_UP_MASK	0x3
+static int smb5_configure_internal_pull(struct smb_charger *chg, int type,
+					int pull)
+{
+	int rc;
+	int shift = type * 2;
+	u8 mask = INTERNAL_PULL_UP_MASK << shift;
+	u8 val = pull << shift;
+
+	rc = smblib_masked_write(chg, BATIF_ADC_INTERNAL_PULL_UP_REG,
+				mask, val);
+	if (rc < 0)
+		dev_err(chg->dev,
+			"Couldn't configure ADC pull-up reg rc=%d\n", rc);
+
+	return rc;
+}
+
 #define MICRO_1P5A		1500000
+#define MICRO_1PA		1000000
 #define MICRO_P1A		100000
 #define OTG_DEFAULT_DEGLITCH_TIME_MS	50
 #define MIN_WD_BARK_TIME		16
@@ -317,7 +372,8 @@
 	rc = of_property_read_u32(node,
 				"qcom,otg-cl-ua", &chg->otg_cl_ua);
 	if (rc < 0)
-		chg->otg_cl_ua = MICRO_1P5A;
+		chg->otg_cl_ua = (chip->chg.smb_version == PMI632_SUBTYPE) ?
+							MICRO_1PA : MICRO_1P5A;
 
 	if (of_find_property(node, "qcom,thermal-mitigation", &byte_len)) {
 		chg->thermal_mitigation = devm_kzalloc(chg->dev, byte_len,
@@ -385,6 +441,16 @@
 	if (rc < 0)
 		chg->otg_delay_ms = OTG_DEFAULT_DEGLITCH_TIME_MS;
 
+	chg->hw_die_temp_mitigation = of_property_read_bool(node,
+					"qcom,hw-die-temp-mitigation");
+
+	chg->hw_connector_mitigation = of_property_read_bool(node,
+					"qcom,hw-connector-mitigation");
+
+	chg->connector_pull_up = -EINVAL;
+	of_property_read_u32(node, "qcom,connector-internal-pull-kohm",
+					&chg->connector_pull_up);
+
 	return 0;
 }
 
@@ -469,9 +535,11 @@
 	POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
 	POWER_SUPPLY_PROP_CONNECTOR_TYPE,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
 	POWER_SUPPLY_PROP_SCOPE,
 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
 	POWER_SUPPLY_PROP_HVDCP_OPTI_ALLOWED,
+	POWER_SUPPLY_PROP_QC_OPTI_DISABLE,
 };
 
 static int smb5_usb_get_prop(struct power_supply *psy,
@@ -505,6 +573,9 @@
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smblib_get_prop_usb_voltage_max(chg, val);
 		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+		rc = smblib_get_prop_usb_voltage_max_design(chg, val);
+		break;
 	case POWER_SUPPLY_PROP_PD_CURRENT_MAX:
 		val->intval = get_client_vote(chg->usb_icl_votable, PD_VOTER);
 		break;
@@ -600,6 +671,13 @@
 	case POWER_SUPPLY_PROP_HVDCP_OPTI_ALLOWED:
 		val->intval = !chg->flash_active;
 		break;
+	case POWER_SUPPLY_PROP_QC_OPTI_DISABLE:
+		if (chg->hw_die_temp_mitigation)
+			val->intval = POWER_SUPPLY_QC_THERMAL_BALANCE_DISABLE
+					| POWER_SUPPLY_QC_INOV_THERMAL_DISABLE;
+		if (chg->hw_connector_mitigation)
+			val->intval |= POWER_SUPPLY_QC_CTM_DISABLE;
+		break;
 	default:
 		pr_err("get prop %d is not supported in usb\n", psp);
 		rc = -EINVAL;
@@ -1491,16 +1569,28 @@
 		return rc;
 	}
 
-	/* configure VCONN for software control */
-	rc = smblib_masked_write(chg, TYPE_C_VCONN_CONTROL_REG,
-				 VCONN_EN_SRC_BIT | VCONN_EN_VALUE_BIT,
-				 VCONN_EN_SRC_BIT);
+	rc = smblib_masked_write(chg, TYPE_C_MODE_CFG_REG,
+				EN_TRY_SNK_BIT, EN_TRY_SNK_BIT);
 	if (rc < 0) {
 		dev_err(chg->dev,
-			"Couldn't configure VCONN for SW control rc=%d\n", rc);
+			"Couldn't enable try.snk rc=%d\n", rc);
 		return rc;
 	}
 
+	/* Keep VCONN in h/w controlled mode for PMI632 */
+	if (chg->smb_version != PMI632_SUBTYPE) {
+		/* configure VCONN for software control */
+		rc = smblib_masked_write(chg, TYPE_C_VCONN_CONTROL_REG,
+				 VCONN_EN_SRC_BIT | VCONN_EN_VALUE_BIT,
+				 VCONN_EN_SRC_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev,
+				"Couldn't configure VCONN for SW control rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
 	return rc;
 }
 
@@ -1520,6 +1610,42 @@
 	return rc;
 }
 
+static int smb5_configure_mitigation(struct smb_charger *chg)
+{
+	int rc;
+	u8 chan = 0;
+
+	if (!chg->hw_die_temp_mitigation && !chg->hw_connector_mitigation)
+		return 0;
+
+	if (chg->hw_die_temp_mitigation) {
+		rc = smblib_write(chg, MISC_THERMREG_SRC_CFG_REG,
+				THERMREG_CONNECTOR_ADC_SRC_EN_BIT
+				| THERMREG_DIE_ADC_SRC_EN_BIT
+				| THERMREG_DIE_CMP_SRC_EN_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev,
+				"Couldn't configure THERM_SRC reg rc=%d\n", rc);
+			return rc;
+		};
+
+		chan = DIE_TEMP_CHANNEL_EN_BIT;
+	}
+
+	if (chg->hw_connector_mitigation)
+		chan |= CONN_THM_CHANNEL_EN_BIT;
+
+	rc = smblib_masked_write(chg, BATIF_ADC_CHANNEL_EN_REG,
+			CONN_THM_CHANNEL_EN_BIT | DIE_TEMP_CHANNEL_EN_BIT,
+			chan);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't enable ADC channelrc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
 static int smb5_init_hw(struct smb5 *chip)
 {
 	struct smb_charger *chg = &chip->chg;
@@ -1594,15 +1720,45 @@
 
 	/*
 	 * PMI632 based hw init:
+	 * - Enable STAT pin function on SMB_EN
 	 * - Rerun APSD to ensure proper charger detection if device
 	 *   boots with charger connected.
 	 * - Initialize flash module for PMI632
 	 */
 	if (chg->smb_version == PMI632_SUBTYPE) {
+		rc = smblib_masked_write(chg, MISC_SMB_EN_CMD_REG,
+					EN_STAT_CMD_BIT, EN_STAT_CMD_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't configure SMB_EN rc=%d\n",
+					rc);
+			return rc;
+		}
+
 		schgm_flash_init(chg);
 		smblib_rerun_apsd_if_required(chg);
 	}
 
+	/* clear the ICL override if it is set */
+	rc = smblib_icl_override(chg, false);
+	if (rc < 0) {
+		pr_err("Couldn't disable ICL override rc=%d\n", rc);
+		return rc;
+	}
+
+	/* set OTG current limit */
+	rc = smblib_set_charge_param(chg, &chg->param.otg_cl, chg->otg_cl_ua);
+	if (rc < 0) {
+		pr_err("Couldn't set otg current limit rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure temperature mitigation */
+	rc = smb5_configure_mitigation(chg);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure mitigation rc=%d\n", rc);
+		return rc;
+	}
+
 	/* vote 0mA on usb_icl for non battery platforms */
 	vote(chg->usb_icl_votable,
 		DEFAULT_VOTER, chip->dt.no_battery, 0);
@@ -1813,6 +1969,17 @@
 		return rc;
 	}
 
+	if (chg->connector_pull_up != -EINVAL) {
+		rc = smb5_configure_internal_pull(chg, CONN_THERM,
+				get_valid_pullup(chg->connector_pull_up));
+		if (rc < 0) {
+			dev_err(chg->dev,
+				"Couldn't configure CONN_THERM pull-up rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
 	return rc;
 }
 
@@ -2366,12 +2533,6 @@
 		return -EINVAL;
 	}
 
-	rc = smb5_parse_dt(chip);
-	if (rc < 0) {
-		pr_err("Couldn't parse device tree rc=%d\n", rc);
-		return rc;
-	}
-
 	rc = smb5_chg_config_init(chip);
 	if (rc < 0) {
 		if (rc != -EPROBE_DEFER)
@@ -2379,6 +2540,12 @@
 		return rc;
 	}
 
+	rc = smb5_parse_dt(chip);
+	if (rc < 0) {
+		pr_err("Couldn't parse device tree rc=%d\n", rc);
+		return rc;
+	}
+
 	rc = smblib_init(chg);
 	if (rc < 0) {
 		pr_err("Smblib_init failed rc=%d\n", rc);
diff --git a/drivers/power/supply/qcom/qpnp-smbcharger.c b/drivers/power/supply/qcom/qpnp-smbcharger.c
index 3bb4810..40cfd9c 100644
--- a/drivers/power/supply/qcom/qpnp-smbcharger.c
+++ b/drivers/power/supply/qcom/qpnp-smbcharger.c
@@ -1520,6 +1520,47 @@
 	return chip->parallel.psy;
 }
 
+static int smbchg_request_dpdm(struct smbchg_chip *chip, bool enable)
+{
+	int rc = 0;
+
+	/* fetch the DPDM regulator */
+	if (!chip->dpdm_reg && of_get_property(chip->dev->of_node,
+				"dpdm-supply", NULL)) {
+		chip->dpdm_reg = devm_regulator_get(chip->dev, "dpdm");
+		if (IS_ERR(chip->dpdm_reg)) {
+			rc = PTR_ERR(chip->dpdm_reg);
+			dev_err(chip->dev, "Couldn't get dpdm regulator rc=%d\n",
+					rc);
+			chip->dpdm_reg = NULL;
+			return rc;
+		}
+	}
+
+	if (!chip->dpdm_reg)
+		return -ENODEV;
+
+	if (enable) {
+		if (!regulator_is_enabled(chip->dpdm_reg)) {
+			pr_smb(PR_STATUS, "enabling DPDM regulator\n");
+			rc = regulator_enable(chip->dpdm_reg);
+			if (rc < 0)
+				dev_err(chip->dev, "Couldn't enable dpdm regulator rc=%d\n",
+					rc);
+		}
+	} else {
+		if (regulator_is_enabled(chip->dpdm_reg)) {
+			pr_smb(PR_STATUS, "disabling DPDM regulator\n");
+			rc = regulator_disable(chip->dpdm_reg);
+			if (rc < 0)
+				dev_err(chip->dev, "Couldn't disable dpdm regulator rc=%d\n",
+					rc);
+		}
+	}
+
+	return rc;
+}
+
 static void smbchg_usb_update_online_work(struct work_struct *work)
 {
 	struct smbchg_chip *chip = container_of(work,
@@ -4590,8 +4631,11 @@
 	if (!rc && !(reg & USBIN_UV_BIT) && !(reg & USBIN_SRC_DET_BIT)) {
 		pr_smb(PR_MISC, "overwriting state = %d with %d\n",
 				state, POWER_SUPPLY_DP_DM_DPF_DMF);
-		if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
-			return regulator_enable(chip->dpdm_reg);
+		rc = smbchg_request_dpdm(chip, true);
+		if (rc < 0) {
+			pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc);
+			return rc;
+		}
 	}
 	pr_smb(PR_MISC, "setting usb psy dp dm = %d\n", state);
 	pval.intval = state;
@@ -4697,8 +4741,7 @@
 	smbchg_relax(chip, PM_DETECT_HVDCP);
 	smbchg_change_usb_supply_type(chip, POWER_SUPPLY_TYPE_UNKNOWN);
 	extcon_set_cable_state_(chip->extcon, EXTCON_USB, chip->usb_present);
-	if (chip->dpdm_reg)
-		regulator_disable(chip->dpdm_reg);
+	smbchg_request_dpdm(chip, false);
 	schedule_work(&chip->usb_set_online_work);
 
 	pr_smb(PR_MISC, "setting usb psy health UNKNOWN\n");
@@ -5248,8 +5291,7 @@
 {
 	int rc = 0;
 
-	if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
-		rc = regulator_enable(chip->dpdm_reg);
+	rc = smbchg_request_dpdm(chip, true);
 	if (rc < 0) {
 		pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc);
 		return rc;
@@ -6481,8 +6523,7 @@
 	 */
 	if (!(reg & USBIN_UV_BIT) && !(reg & USBIN_SRC_DET_BIT)) {
 		pr_smb(PR_MISC, "setting usb dp=f dm=f\n");
-		if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
-			rc = regulator_enable(chip->dpdm_reg);
+		rc = smbchg_request_dpdm(chip, true);
 		if (rc < 0) {
 			pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc);
 			return rc;
@@ -6731,15 +6772,8 @@
 	chip->dc_present = is_dc_present(chip);
 
 	if (chip->usb_present) {
-		int rc = 0;
-
 		pr_smb(PR_MISC, "setting usb dp=f dm=f\n");
-		if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
-			rc = regulator_enable(chip->dpdm_reg);
-		if (rc < 0) {
-			pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc);
-			return rc;
-		}
+		smbchg_request_dpdm(chip, true);
 		handle_usb_insertion(chip);
 	} else {
 		handle_usb_removal(chip);
@@ -8372,14 +8406,6 @@
 		goto votables_cleanup;
 	}
 
-	if (of_find_property(chip->dev->of_node, "dpdm-supply", NULL)) {
-		chip->dpdm_reg = devm_regulator_get(chip->dev, "dpdm");
-		if (IS_ERR(chip->dpdm_reg)) {
-			rc = PTR_ERR(chip->dpdm_reg);
-			goto votables_cleanup;
-		}
-	}
-
 	rc = smbchg_hw_init(chip);
 	if (rc < 0) {
 		dev_err(&pdev->dev,
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 39005f6..612c3dd 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -1908,7 +1908,8 @@
 		   stat);
 
 	if (stat & CHARGER_ERROR_STATUS_BAT_OV_BIT) {
-		rc = smblib_get_prop_batt_voltage_now(chg, &pval);
+		rc = smblib_get_prop_from_bms(chg,
+				POWER_SUPPLY_PROP_VOLTAGE_NOW, &pval);
 		if (!rc) {
 			/*
 			 * If Vbatt is within 40mV above Vfloat, then don't
@@ -1973,45 +1974,6 @@
 	return 0;
 }
 
-int smblib_get_prop_batt_voltage_now(struct smb_charger *chg,
-				     union power_supply_propval *val)
-{
-	int rc;
-
-	if (!chg->bms_psy)
-		return -EINVAL;
-
-	rc = power_supply_get_property(chg->bms_psy,
-				       POWER_SUPPLY_PROP_VOLTAGE_NOW, val);
-	return rc;
-}
-
-int smblib_get_prop_batt_current_now(struct smb_charger *chg,
-				     union power_supply_propval *val)
-{
-	int rc;
-
-	if (!chg->bms_psy)
-		return -EINVAL;
-
-	rc = power_supply_get_property(chg->bms_psy,
-				       POWER_SUPPLY_PROP_CURRENT_NOW, val);
-	return rc;
-}
-
-int smblib_get_prop_batt_temp(struct smb_charger *chg,
-			      union power_supply_propval *val)
-{
-	int rc;
-
-	if (!chg->bms_psy)
-		return -EINVAL;
-
-	rc = power_supply_get_property(chg->bms_psy,
-				       POWER_SUPPLY_PROP_TEMP, val);
-	return rc;
-}
-
 int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
 					union power_supply_propval *val)
 {
@@ -2047,16 +2009,17 @@
 	return 0;
 }
 
-int smblib_get_prop_batt_charge_counter(struct smb_charger *chg,
-				     union power_supply_propval *val)
+int smblib_get_prop_from_bms(struct smb_charger *chg,
+				enum power_supply_property psp,
+				union power_supply_propval *val)
 {
 	int rc;
 
 	if (!chg->bms_psy)
 		return -EINVAL;
 
-	rc = power_supply_get_property(chg->bms_psy,
-				       POWER_SUPPLY_PROP_CHARGE_COUNTER, val);
+	rc = power_supply_get_property(chg->bms_psy, psp, val);
+
 	return rc;
 }
 
@@ -2452,6 +2415,28 @@
 	switch (chg->real_charger_type) {
 	case POWER_SUPPLY_TYPE_USB_HVDCP:
 	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
+		if (chg->smb_version == PM660_SUBTYPE)
+			val->intval = MICRO_9V;
+		else
+			val->intval = MICRO_12V;
+		break;
+	case POWER_SUPPLY_TYPE_USB_PD:
+		val->intval = chg->voltage_max_uv;
+		break;
+	default:
+		val->intval = MICRO_5V;
+		break;
+	}
+
+	return 0;
+}
+
+int smblib_get_prop_usb_voltage_max_design(struct smb_charger *chg,
+					union power_supply_propval *val)
+{
+	switch (chg->real_charger_type) {
+	case POWER_SUPPLY_TYPE_USB_HVDCP:
+	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
 	case POWER_SUPPLY_TYPE_USB_PD:
 		if (chg->smb_version == PM660_SUBTYPE)
 			val->intval = MICRO_9V;
@@ -3445,6 +3430,9 @@
 	struct smb_irq_data *irq_data = data;
 	struct smb_charger *chg = irq_data->parent_data;
 	struct storm_watch *wdata;
+	const struct apsd_result *apsd = smblib_get_apsd_result(chg);
+	int rc;
+	u8 stat = 0, max_pulses = 0;
 
 	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
 	if (!chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data)
@@ -3452,6 +3440,46 @@
 
 	wdata = &chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data->storm_data;
 	reset_storm_count(wdata);
+
+	if (!chg->non_compliant_chg_detected &&
+			apsd->pst == POWER_SUPPLY_TYPE_USB_HVDCP) {
+		rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't read CHANGE_STATUS_REG rc=%d\n", rc);
+
+		if (stat & QC_5V_BIT)
+			return IRQ_HANDLED;
+
+		rc = smblib_read(chg, HVDCP_PULSE_COUNT_MAX_REG, &max_pulses);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't read QC2 max pulses rc=%d\n", rc);
+
+		chg->non_compliant_chg_detected = true;
+		chg->qc2_max_pulses = (max_pulses &
+				HVDCP_PULSE_COUNT_MAX_QC2_MASK);
+
+		if (stat & QC_12V_BIT) {
+			rc = smblib_masked_write(chg, HVDCP_PULSE_COUNT_MAX_REG,
+					HVDCP_PULSE_COUNT_MAX_QC2_MASK,
+					HVDCP_PULSE_COUNT_MAX_QC2_9V);
+			if (rc < 0)
+				smblib_err(chg, "Couldn't force max pulses to 9V rc=%d\n",
+						rc);
+
+		} else if (stat & QC_9V_BIT) {
+			rc = smblib_masked_write(chg, HVDCP_PULSE_COUNT_MAX_REG,
+					HVDCP_PULSE_COUNT_MAX_QC2_MASK,
+					HVDCP_PULSE_COUNT_MAX_QC2_5V);
+			if (rc < 0)
+				smblib_err(chg, "Couldn't force max pulses to 5V rc=%d\n",
+						rc);
+
+		}
+		smblib_rerun_apsd(chg);
+	}
+
 	return IRQ_HANDLED;
 }
 
@@ -4270,6 +4298,17 @@
 	if (rc < 0)
 		smblib_err(chg, "Couldn't set 120mS tCC debounce rc=%d\n", rc);
 
+	/* if non-compliant charger caused UV, restore original max pulses */
+	if (chg->non_compliant_chg_detected) {
+		rc = smblib_masked_write(chg, HVDCP_PULSE_COUNT_MAX_REG,
+				HVDCP_PULSE_COUNT_MAX_QC2_MASK,
+				chg->qc2_max_pulses);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't restore max pulses rc=%d\n",
+					rc);
+		chg->non_compliant_chg_detected = false;
+	}
+
 	/* enable APSD CC trigger for next insertion */
 	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
 				APSD_START_ON_CC_BIT, APSD_START_ON_CC_BIT);
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index dc8cbc7..3b8bc1f 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -353,6 +353,7 @@
 	bool			use_extcon;
 	bool			otg_present;
 	bool			is_audio_adapter;
+	bool			disable_stat_sw_override;
 
 	/* workaround flag */
 	u32			wa_flags;
@@ -361,6 +362,8 @@
 	bool			try_sink_active;
 	int			boost_current_ua;
 	int			temp_speed_reading_count;
+	int			qc2_max_pulses;
+	bool			non_compliant_chg_detected;
 	bool			fake_usb_insertion;
 
 	/* extcon for VBUS / ID notification to USB for uUSB */
@@ -444,14 +447,6 @@
 				union power_supply_propval *val);
 int smblib_get_prop_input_current_limited(struct smb_charger *chg,
 				union power_supply_propval *val);
-int smblib_get_prop_batt_voltage_now(struct smb_charger *chg,
-				union power_supply_propval *val);
-int smblib_get_prop_batt_current_now(struct smb_charger *chg,
-				union power_supply_propval *val);
-int smblib_get_prop_batt_temp(struct smb_charger *chg,
-				union power_supply_propval *val);
-int smblib_get_prop_batt_charge_counter(struct smb_charger *chg,
-				union power_supply_propval *val);
 int smblib_set_prop_input_suspend(struct smb_charger *chg,
 				const union power_supply_propval *val);
 int smblib_set_prop_batt_capacity(struct smb_charger *chg,
@@ -480,6 +475,8 @@
 				union power_supply_propval *val);
 int smblib_get_prop_usb_voltage_max(struct smb_charger *chg,
 				union power_supply_propval *val);
+int smblib_get_prop_usb_voltage_max_design(struct smb_charger *chg,
+				union power_supply_propval *val);
 int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_usb_current_now(struct smb_charger *chg,
@@ -541,6 +538,9 @@
 int smblib_get_charge_current(struct smb_charger *chg, int *total_current_ua);
 int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg,
 				union power_supply_propval *val);
+int smblib_get_prop_from_bms(struct smb_charger *chg,
+				enum power_supply_property psp,
+				union power_supply_propval *val);
 int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
 				const union power_supply_propval *val);
 int smblib_stat_sw_override_cfg(struct smb_charger *chg, bool override);
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index d40d6fd..449d974 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -587,6 +587,15 @@
 #define EN_LEGACY_CABLE_DETECTION_BIT		BIT(1)
 #define ALLOW_PD_DRING_UFP_TCCDB_BIT		BIT(0)
 
+#define HVDCP_PULSE_COUNT_MAX_REG		(USBIN_BASE + 0x5B)
+#define HVDCP_PULSE_COUNT_MAX_QC2_MASK		GENMASK(7, 6)
+enum {
+	HVDCP_PULSE_COUNT_MAX_QC2_5V,
+	HVDCP_PULSE_COUNT_MAX_QC2_9V,
+	HVDCP_PULSE_COUNT_MAX_QC2_12V,
+	HVDCP_PULSE_COUNT_MAX_QC2_INVALID
+};
+
 #define USBIN_ADAPTER_ALLOW_CFG_REG		(USBIN_BASE + 0x60)
 #define USBIN_ADAPTER_ALLOW_MASK		GENMASK(3, 0)
 enum {
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
index 1d99ccb..2671d6b 100644
--- a/drivers/power/supply/qcom/smb1355-charger.c
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -80,8 +80,12 @@
 #define BATIF_CFG_SMISC_BATID_REG		(BATIF_BASE + 0x73)
 #define CFG_SMISC_RBIAS_EXT_CTRL_BIT		BIT(2)
 
-#define SMB2CHGS_BATIF_ENG_SMISC_DIETEMP	(BATIF_BASE + 0xC0)
+#define SMB2CHG_BATIF_ENG_SMISC_DIETEMP	(BATIF_BASE + 0xC0)
 #define TDIE_COMPARATOR_THRESHOLD		GENMASK(5, 0)
+#define DIE_LOW_RANGE_BASE_DEGC			34
+#define DIE_LOW_RANGE_DELTA			16
+#define DIE_LOW_RANGE_MAX_DEGC			97
+#define DIE_LOW_RANGE_SHIFT			4
 
 #define BATIF_ENG_SCMISC_SPARE1_REG		(BATIF_BASE + 0xC2)
 #define EXT_BIAS_PIN_BIT			BIT(2)
@@ -91,13 +95,10 @@
 #define VALLEY_COMPARATOR_EN_BIT		BIT(0)
 
 #define TEMP_COMP_STATUS_REG			(MISC_BASE + 0x07)
-#define SKIN_TEMP_RST_HOT_BIT			BIT(6)
-#define SKIN_TEMP_UB_HOT_BIT			BIT(5)
-#define SKIN_TEMP_LB_HOT_BIT			BIT(4)
-#define DIE_TEMP_TSD_HOT_BIT			BIT(3)
-#define DIE_TEMP_RST_HOT_BIT			BIT(2)
-#define DIE_TEMP_UB_HOT_BIT			BIT(1)
-#define DIE_TEMP_LB_HOT_BIT			BIT(0)
+#define TEMP_RST_HOT_BIT			BIT(2)
+#define TEMP_UB_HOT_BIT				BIT(1)
+#define TEMP_LB_HOT_BIT				BIT(0)
+#define SKIN_TEMP_SHIFT				4
 
 #define MISC_RT_STS_REG				(MISC_BASE + 0x10)
 #define HARD_ILIMIT_RT_STS_BIT			BIT(5)
@@ -223,6 +224,8 @@
 	bool	disable_ctm;
 	int	pl_mode;
 	int	pl_batfet_mode;
+	bool	hw_die_temp_mitigation;
+	u32	die_temp_threshold;
 };
 
 struct smb1355 {
@@ -250,6 +253,11 @@
 	struct votable		*irq_disable_votable;
 };
 
+enum {
+	CONNECTOR_TEMP = 0,
+	DIE_TEMP,
+};
+
 static bool is_secure(struct smb1355 *chip, int addr)
 {
 	if (addr == CLOCK_REQUEST_REG)
@@ -379,8 +387,7 @@
 	u8 temp_stat;
 
 	for (i = 0; i < BIT(5); i++) {
-		rc = smb1355_masked_write(chip,
-				SMB2CHGS_BATIF_ENG_SMISC_DIETEMP,
+		rc = smb1355_masked_write(chip, SMB2CHG_BATIF_ENG_SMISC_DIETEMP,
 				TDIE_COMPARATOR_THRESHOLD, i);
 		if (rc < 0) {
 			pr_err("Couldn't set temp comp threshold rc=%d\n", rc);
@@ -399,7 +406,7 @@
 			continue;
 		}
 
-		if (!(temp_stat & DIE_TEMP_UB_HOT_BIT)) {
+		if (!(temp_stat & TEMP_UB_HOT_BIT)) {
 			/* found the temp */
 			break;
 		}
@@ -465,6 +472,7 @@
 	return 0;
 }
 
+#define DEFAULT_DIE_TEMP_LOW_THRESHOLD		90
 static int smb1355_parse_dt(struct smb1355 *chip)
 {
 	struct device_node *node = chip->dev->of_node;
@@ -495,6 +503,15 @@
 	if (of_property_read_bool(node, "qcom,stacked-batfet"))
 		chip->dt.pl_batfet_mode = POWER_SUPPLY_PL_STACKED_BATFET;
 
+	chip->dt.hw_die_temp_mitigation = of_property_read_bool(node,
+					"qcom,hw-die-temp-mitigation");
+
+	chip->dt.die_temp_threshold = DEFAULT_DIE_TEMP_LOW_THRESHOLD;
+	of_property_read_u32(node, "qcom,die-temp-threshold-degc",
+				&chip->dt.die_temp_threshold);
+	if (chip->dt.die_temp_threshold > DIE_LOW_RANGE_MAX_DEGC)
+		chip->dt.die_temp_threshold = DIE_LOW_RANGE_MAX_DEGC;
+
 	return 0;
 }
 
@@ -520,6 +537,7 @@
 	POWER_SUPPLY_PROP_MIN_ICL,
 	POWER_SUPPLY_PROP_CURRENT_MAX,
 	POWER_SUPPLY_PROP_SET_SHIP_MODE,
+	POWER_SUPPLY_PROP_DIE_HEALTH,
 };
 
 static int smb1355_get_prop_batt_charge_type(struct smb1355 *chip,
@@ -546,10 +564,13 @@
 	return rc;
 }
 
-static int smb1355_get_prop_connector_health(struct smb1355 *chip)
+static int smb1355_get_prop_health(struct smb1355 *chip, int type)
 {
 	u8 temp;
-	int rc;
+	int rc, shift;
+
+	/* Connector-temp uses skin-temp configuration */
+	shift = (type == CONNECTOR_TEMP) ? SKIN_TEMP_SHIFT : 0;
 
 	rc = smb1355_read(chip, TEMP_COMP_STATUS_REG, &temp);
 	if (rc < 0) {
@@ -557,13 +578,13 @@
 		return POWER_SUPPLY_HEALTH_UNKNOWN;
 	}
 
-	if (temp & SKIN_TEMP_RST_HOT_BIT)
+	if (temp & (TEMP_RST_HOT_BIT << shift))
 		return POWER_SUPPLY_HEALTH_OVERHEAT;
 
-	if (temp & SKIN_TEMP_UB_HOT_BIT)
+	if (temp & (TEMP_UB_HOT_BIT << shift))
 		return POWER_SUPPLY_HEALTH_HOT;
 
-	if (temp & SKIN_TEMP_LB_HOT_BIT)
+	if (temp & (TEMP_LB_HOT_BIT << shift))
 		return POWER_SUPPLY_HEALTH_WARM;
 
 	return POWER_SUPPLY_HEALTH_COOL;
@@ -614,7 +635,17 @@
 		val->intval = chip->die_temp_deciDegC;
 		break;
 	case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
-		rc = smb1355_get_prop_charger_temp_max(chip, val);
+		/*
+		 * In case of h/w controlled die_temp mitigation,
+		 * die_temp/die_temp_max can not be reported as this
+		 * requires run time manipulation of DIE_TEMP low
+		 * threshold which will interfere with h/w mitigation
+		 * scheme.
+		 */
+		if (chip->dt.hw_die_temp_mitigation)
+			val->intval = -EINVAL;
+		else
+			rc = smb1355_get_prop_charger_temp_max(chip, val);
 		break;
 	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
 		val->intval = chip->disabled;
@@ -635,10 +666,14 @@
 		break;
 	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
 		if (chip->c_health == -EINVAL)
-			val->intval = smb1355_get_prop_connector_health(chip);
+			val->intval = smb1355_get_prop_health(chip,
+						CONNECTOR_TEMP);
 		else
 			val->intval = chip->c_health;
 		break;
+	case POWER_SUPPLY_PROP_DIE_HEALTH:
+		val->intval = smb1355_get_prop_health(chip, DIE_TEMP);
+		break;
 	case POWER_SUPPLY_PROP_PARALLEL_BATFET_MODE:
 		val->intval = chip->dt.pl_batfet_mode;
 		break;
@@ -708,13 +743,16 @@
 	}
 
 	chip->die_temp_deciDegC = -EINVAL;
-	if (disable) {
-		chip->exit_die_temp = true;
-		cancel_delayed_work_sync(&chip->die_temp_work);
-	} else {
-		/* start the work to measure temperature */
-		chip->exit_die_temp = false;
-		schedule_delayed_work(&chip->die_temp_work, 0);
+	/* Only enable temperature measurement for s/w based mitigation */
+	if (!chip->dt.hw_die_temp_mitigation) {
+		if (disable) {
+			chip->exit_die_temp = true;
+			cancel_delayed_work_sync(&chip->die_temp_work);
+		} else {
+			/* start the work to measure temperature */
+			chip->exit_die_temp = false;
+			schedule_delayed_work(&chip->die_temp_work, 0);
+		}
 	}
 
 	if (chip->irq_disable_votable)
@@ -978,6 +1016,7 @@
 static int smb1355_init_hw(struct smb1355 *chip)
 {
 	int rc;
+	u8 val, range;
 
 	/* request clock always on */
 	rc = smb1355_clk_request(chip, true);
@@ -1045,13 +1084,35 @@
 		return rc;
 	}
 
+	/* Configure DIE temp Low threshold */
+	if (chip->dt.hw_die_temp_mitigation) {
+		range = (chip->dt.die_temp_threshold - DIE_LOW_RANGE_BASE_DEGC)
+						/ (DIE_LOW_RANGE_DELTA);
+		val = (chip->dt.die_temp_threshold
+				- ((range * DIE_LOW_RANGE_DELTA)
+						+ DIE_LOW_RANGE_BASE_DEGC))
+				% DIE_LOW_RANGE_DELTA;
+
+		rc = smb1355_masked_write(chip, SMB2CHG_BATIF_ENG_SMISC_DIETEMP,
+				TDIE_COMPARATOR_THRESHOLD,
+				(range << DIE_LOW_RANGE_SHIFT) | val);
+		if (rc < 0) {
+			pr_err("Couldn't set temp comp threshold rc=%d\n", rc);
+			return rc;
+		}
+	}
+
 	/*
-	 * Enable thermal Die temperature comparator source and disable hw
-	 * mitigation for skin/die
+	 * Enable thermal Die temperature comparator source and
+	 * enable hardware controlled current adjustment for die temp
+	 * if charger is configured in h/w controlled die temp mitigation.
 	 */
+	val = THERMREG_DIE_CMP_SRC_EN_BIT;
+	if (!chip->dt.hw_die_temp_mitigation)
+		val |= BYP_THERM_CHG_CURR_ADJUST_BIT;
 	rc = smb1355_masked_write(chip, MISC_THERMREG_SRC_CFG_REG,
 		THERMREG_DIE_CMP_SRC_EN_BIT | BYP_THERM_CHG_CURR_ADJUST_BIT,
-		THERMREG_DIE_CMP_SRC_EN_BIT | BYP_THERM_CHG_CURR_ADJUST_BIT);
+		val);
 	if (rc < 0) {
 		pr_err("Couldn't set Skin temperature comparator src rc=%d\n",
 			rc);
@@ -1062,8 +1123,9 @@
 	 * Disable hysterisis for die temperature. This is so that sw can run
 	 * stepping scheme quickly
 	 */
+	val = chip->dt.hw_die_temp_mitigation ? DIE_TEMP_COMP_HYST_BIT : 0;
 	rc = smb1355_masked_write(chip, BATIF_ENG_SCMISC_SPARE1_REG,
-				DIE_TEMP_COMP_HYST_BIT, 0);
+				DIE_TEMP_COMP_HYST_BIT, val);
 	if (rc < 0) {
 		pr_err("Couldn't disable hyst. for die rc=%d\n", rc);
 		return rc;
diff --git a/drivers/power/supply/qcom/smb1360-charger-fg.c b/drivers/power/supply/qcom/smb1360-charger-fg.c
index ed9c610..4e98ec7 100644
--- a/drivers/power/supply/qcom/smb1360-charger-fg.c
+++ b/drivers/power/supply/qcom/smb1360-charger-fg.c
@@ -3424,8 +3424,8 @@
 	chip->otg_vreg.rdesc.owner = THIS_MODULE;
 	chip->otg_vreg.rdesc.type = REGULATOR_VOLTAGE;
 	chip->otg_vreg.rdesc.ops = &smb1360_otg_reg_ops;
-	chip->otg_vreg.rdesc.of_match = chip->dev->of_node->name;
-	chip->otg_vreg.rdesc.name = chip->dev->of_node->name;
+	chip->otg_vreg.rdesc.of_match = "qcom,smb1360-vbus";
+	chip->otg_vreg.rdesc.name = "qcom,smb1360-vbus";
 
 	cfg.dev = chip->dev;
 	cfg.driver_data = chip;
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index f1df8f0..86ecda5 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -1371,10 +1371,12 @@
 	[USBIN_PLUGIN_IRQ] = {
 		.name		= "usbin-plugin",
 		.handler	= smblib_handle_usb_plugin,
+		.wake		= true,
 	},
 	[USBIN_SRC_CHANGE_IRQ] = {
 		.name		= "usbin-src-change",
 		.handler	= smblib_handle_usb_source_change,
+		.wake		= true,
 	},
 	[USBIN_ICL_CHANGE_IRQ] = {
 		.name		= "usbin-icl-change",
@@ -1383,6 +1385,7 @@
 	[TYPE_C_CHANGE_IRQ] = {
 		.name		= "type-c-change",
 		.handler	= smblib_handle_usb_typec_change,
+		.wake		= true,
 	},
 /* DC INPUT IRQs */
 	[DCIN_COLLAPSE_IRQ] = {
@@ -1825,6 +1828,8 @@
 		goto cleanup;
 	}
 
+	device_init_wakeup(chip->chg.dev, true);
+
 	pr_info("SMB138X probed successfully mode=%d\n", chip->chg.mode);
 	return rc;
 
diff --git a/drivers/power/supply/qcom/smb1390-charger.c b/drivers/power/supply/qcom/smb1390-charger.c
index 55a1f45..a92c975 100644
--- a/drivers/power/supply/qcom/smb1390-charger.c
+++ b/drivers/power/supply/qcom/smb1390-charger.c
@@ -82,6 +82,7 @@
 #define FCC_VOTER	"FCC_VOTER"
 #define ICL_VOTER	"ICL_VOTER"
 #define USB_VOTER	"USB_VOTER"
+#define SWITCHER_TOGGLE_VOTER	"SWITCHER_TOGGLE_VOTER"
 
 enum {
 	SWITCHER_OFF_WINDOW_IRQ = 0,
@@ -100,6 +101,7 @@
 	struct regmap		*regmap;
 	struct notifier_block	nb;
 	struct class		cp_class;
+	struct wakeup_source	*cp_ws;
 
 	/* work structs */
 	struct work_struct	status_change_work;
@@ -114,6 +116,7 @@
 	struct votable		*pl_disable_votable;
 	struct votable		*fcc_votable;
 	struct votable		*hvdcp_hw_inov_dis_votable;
+	struct votable		*cp_awake_votable;
 
 	/* power supplies */
 	struct power_supply	*usb_psy;
@@ -124,6 +127,7 @@
 	bool			status_change_running;
 	bool			taper_work_running;
 	int			adc_channel;
+	int			irq_status;
 };
 
 struct smb_irq {
@@ -204,6 +208,18 @@
 	return true;
 }
 
+static void cp_toggle_switcher(struct smb1390 *chip)
+{
+	vote(chip->disable_votable, SWITCHER_TOGGLE_VOTER, true, 0);
+
+	/* Delay for toggling switcher */
+	usleep_range(20, 30);
+
+	vote(chip->disable_votable, SWITCHER_TOGGLE_VOTER, false, 0);
+
+	return;
+}
+
 static irqreturn_t default_irq_handler(int irq, void *data)
 {
 	struct smb1390 *chip = data;
@@ -212,40 +228,13 @@
 	for (i = 0; i < NUM_IRQS; ++i) {
 		if (irq == chip->irqs[i])
 			pr_debug("%s IRQ triggered\n", smb_irqs[i].name);
+			chip->irq_status |= 1 << i;
 	}
 
 	kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t irev_irq_handler(int irq, void *data)
-{
-	struct smb1390 *chip = data;
-	int rc;
-
-	pr_debug("IREV IRQ triggered\n");
-
-	rc = smb1390_masked_write(chip, CORE_CONTROL1_REG,
-			CMD_EN_SWITCHER_BIT, 0);
-	if (rc < 0) {
-		pr_err("Couldn't disable switcher by command mode, rc=%d\n",
-			rc);
-		goto out;
-	}
-
-	rc = smb1390_masked_write(chip, CORE_CONTROL1_REG,
-			CMD_EN_SWITCHER_BIT, CMD_EN_SWITCHER_BIT);
-	if (rc < 0) {
-		pr_err("Couldn't enable switcher by command mode, rc=%d\n",
-			rc);
-		goto out;
-	}
-
-out:
-	kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
-	return IRQ_HANDLED;
-}
-
 static const struct smb_irq smb_irqs[] = {
 	[SWITCHER_OFF_WINDOW_IRQ] = {
 		.name		= "switcher-off-window",
@@ -264,7 +253,7 @@
 	},
 	[IREV_IRQ] = {
 		.name		= "irev-fault",
-		.handler	= irev_irq_handler,
+		.handler	= default_irq_handler,
 		.wake		= true,
 	},
 	[VPH_OV_HARD_IRQ] = {
@@ -338,6 +327,38 @@
 	return count;
 }
 
+static ssize_t cp_irq_show(struct class *c, struct class_attribute *attr,
+			char *buf)
+{
+	struct smb1390 *chip = container_of(c, struct smb1390, cp_class);
+	int rc, val;
+
+	rc = smb1390_read(chip, CORE_INT_RT_STS_REG, &val);
+	if (rc < 0)
+		return -EINVAL;
+
+	val |= chip->irq_status;
+	chip->irq_status = 0;
+
+	return snprintf(buf, PAGE_SIZE, "%x\n", val);
+}
+
+static ssize_t toggle_switcher_store(struct class *c,
+			struct class_attribute *attr, const char *buf,
+			size_t count)
+{
+	struct smb1390 *chip = container_of(c, struct smb1390, cp_class);
+	unsigned long val;
+
+	if (kstrtoul(buf, 0, &val))
+		return -EINVAL;
+
+	if (val)
+		cp_toggle_switcher(chip);
+
+	return count;
+}
+
 static ssize_t die_temp_show(struct class *c, struct class_attribute *attr,
 			     char *buf)
 {
@@ -358,6 +379,8 @@
 	__ATTR_RO(stat1),
 	__ATTR_RO(stat2),
 	__ATTR_RW(enable),
+	__ATTR_RO(cp_irq),
+	__ATTR_WO(toggle_switcher),
 	__ATTR_RO(die_temp),
 	__ATTR_NULL,
 };
@@ -378,11 +401,12 @@
 		if (rc < 0)
 			return rc;
 
-		vote(chip->hvdcp_hw_inov_dis_votable, CP_VOTER, false, 0);
 		vote(chip->pl_disable_votable, CP_VOTER, false, 0);
+		vote(chip->cp_awake_votable, CP_VOTER, false, 0);
 	} else {
 		vote(chip->hvdcp_hw_inov_dis_votable, CP_VOTER, true, 0);
 		vote(chip->pl_disable_votable, CP_VOTER, true, 0);
+		vote(chip->cp_awake_votable, CP_VOTER, true, 0);
 		rc = smb1390_masked_write(chip, CORE_CONTROL1_REG,
 				   CMD_EN_SWITCHER_BIT, CMD_EN_SWITCHER_BIT);
 		if (rc < 0)
@@ -428,6 +452,20 @@
 	return rc;
 }
 
+static int smb1390_awake_vote_cb(struct votable *votable, void *data,
+				int awake, const char *client)
+{
+	struct smb1390 *chip = data;
+
+	if (awake)
+		__pm_stay_awake(chip->cp_ws);
+	else
+		__pm_relax(chip->cp_ws);
+
+	pr_debug("client: %s awake: %d\n", client, awake);
+	return 0;
+}
+
 static int smb1390_notifier_cb(struct notifier_block *nb,
 			       unsigned long event, void *data)
 {
@@ -589,6 +627,11 @@
 	if (IS_ERR(chip->ilim_votable))
 		return PTR_ERR(chip->ilim_votable);
 
+	chip->cp_awake_votable = create_votable("CP_AWAKE", VOTE_SET_ANY,
+			smb1390_awake_vote_cb, chip);
+	if (IS_ERR(chip->cp_awake_votable))
+		return PTR_ERR(chip->cp_awake_votable);
+
 	return 0;
 }
 
@@ -722,16 +765,21 @@
 	rc = smb1390_parse_dt(chip);
 	if (rc < 0) {
 		pr_err("Couldn't parse device tree rc=%d\n", rc);
-		goto out_work;
+		return rc;
 	}
 
 	chip->vadc_dev = qpnp_get_vadc(chip->dev, "smb");
 	if (IS_ERR(chip->vadc_dev)) {
 		rc = PTR_ERR(chip->vadc_dev);
-		pr_err("Couldn't get vadc dev rc=%d\n", rc);
-		goto out_work;
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't get vadc dev rc=%d\n", rc);
+		return rc;
 	}
 
+	chip->cp_ws = wakeup_source_register("qcom-chargepump");
+	if (!chip->cp_ws)
+		return rc;
+
 	rc = smb1390_create_votables(chip);
 	if (rc < 0) {
 		pr_err("Couldn't create votables rc=%d\n", rc);
@@ -778,6 +826,7 @@
 out_work:
 	cancel_work(&chip->taper_work);
 	cancel_work(&chip->status_change_work);
+	wakeup_source_unregister(chip->cp_ws);
 	return rc;
 }
 
@@ -790,8 +839,10 @@
 
 	/* explicitly disable charging */
 	vote(chip->disable_votable, USER_VOTER, true, 0);
+	vote(chip->hvdcp_hw_inov_dis_votable, CP_VOTER, false, 0);
 	cancel_work(&chip->taper_work);
 	cancel_work(&chip->status_change_work);
+	wakeup_source_unregister(chip->cp_ws);
 	smb1390_destroy_votables(chip);
 	return 0;
 }
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index a5af817..c70d51e8 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -116,6 +116,19 @@
 	return 0;
 }
 
+int smblib_icl_override(struct smb_charger *chg, bool override)
+{
+	int rc;
+
+	rc = smblib_masked_write(chg, USBIN_LOAD_CFG_REG,
+				ICL_OVERRIDE_AFTER_APSD_BIT,
+				override ? ICL_OVERRIDE_AFTER_APSD_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
+
+	return rc;
+}
+
 int smblib_stat_sw_override_cfg(struct smb_charger *chg, bool override)
 {
 	int rc = 0;
@@ -459,6 +472,23 @@
 {
 	int rc = 0;
 
+	/* PMI632 only support max. 9V */
+	if (chg->smb_version == PMI632_SUBTYPE) {
+		switch (allowed_voltage) {
+		case USBIN_ADAPTER_ALLOW_12V:
+		case USBIN_ADAPTER_ALLOW_9V_TO_12V:
+			allowed_voltage = USBIN_ADAPTER_ALLOW_9V;
+			break;
+		case USBIN_ADAPTER_ALLOW_5V_OR_12V:
+		case USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V:
+			allowed_voltage = USBIN_ADAPTER_ALLOW_5V_OR_9V;
+			break;
+		case USBIN_ADAPTER_ALLOW_5V_TO_12V:
+			allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_9V;
+			break;
+		}
+	}
+
 	rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG, allowed_voltage);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't write 0x%02x to USBIN_ADAPTER_ALLOW_CFG rc=%d\n",
@@ -517,7 +547,7 @@
 	int rc;
 	u8 mask = HVDCP_EN_BIT | BC1P2_SRC_DETECT_BIT;
 
-	if (chg->pd_disabled)
+	if (chg->pd_not_supported)
 		return 0;
 
 	rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG, mask,
@@ -871,7 +901,7 @@
 int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
 {
 	int rc = 0;
-	bool hc_mode = false;
+	bool hc_mode = false, override = false;
 
 	/* suspend and return if 25mA or less is requested */
 	if (icl_ua <= USBIN_25MA)
@@ -881,9 +911,10 @@
 		goto set_mode;
 
 	/* configure current */
-	if (((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT)
-		|| (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB))
-		&& (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)) {
+	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB
+		&& (chg->typec_legacy
+		|| chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
+		|| chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)) {
 		rc = set_sdp_current(chg, icl_ua);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't set SDP ICL rc=%d\n", rc);
@@ -897,6 +928,13 @@
 			goto out;
 		}
 		hc_mode = true;
+
+		/*
+		 * Micro USB mode follows ICL register independent of override
+		 * bit, configure override only for typeC mode.
+		 */
+		if (chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC)
+			override = true;
 	}
 
 set_mode:
@@ -907,6 +945,12 @@
 		goto out;
 	}
 
+	rc = smblib_icl_override(chg, override);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set ICL override rc=%d\n", rc);
+		goto out;
+	}
+
 	/* unsuspend after configuring current and override */
 	rc = smblib_set_usb_suspend(chg, false);
 	if (rc < 0) {
@@ -1850,6 +1894,28 @@
 	switch (chg->real_charger_type) {
 	case POWER_SUPPLY_TYPE_USB_HVDCP:
 	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
+		if (chg->smb_version == PMI632_SUBTYPE)
+			val->intval = MICRO_9V;
+		else
+			val->intval = MICRO_12V;
+		break;
+	case POWER_SUPPLY_TYPE_USB_PD:
+		val->intval = chg->voltage_max_uv;
+		break;
+	default:
+		val->intval = MICRO_5V;
+		break;
+	}
+
+	return 0;
+}
+
+int smblib_get_prop_usb_voltage_max_design(struct smb_charger *chg,
+					union power_supply_propval *val)
+{
+	switch (chg->real_charger_type) {
+	case POWER_SUPPLY_TYPE_USB_HVDCP:
+	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
 	case POWER_SUPPLY_TYPE_USB_PD:
 		if (chg->smb_version == PMI632_SUBTYPE)
 			val->intval = MICRO_9V;
@@ -2065,7 +2131,7 @@
 	int rc;
 	u8 stat;
 
-	rc = smblib_read(chg, TEMP_RANGE_STATUS_REG, &stat);
+	rc = smblib_read(chg, MISC_TEMP_RANGE_STATUS_REG, &stat);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read TEMP_RANGE_STATUS_REG rc=%d\n",
 									rc);
@@ -2647,6 +2713,24 @@
 	struct smb_charger *chg = irq_data->parent_data;
 
 	if (chg->mode == PARALLEL_MASTER) {
+		/*
+		 * Ignore if change in ICL is due to DIE temp mitigation.
+		 * This is to prevent any further ICL split.
+		 */
+		if (chg->hw_die_temp_mitigation) {
+			rc = smblib_read(chg, MISC_DIE_TEMP_STATUS_REG, &stat);
+			if (rc < 0) {
+				smblib_err(chg,
+					"Couldn't read DIE_TEMP rc=%d\n", rc);
+				return IRQ_HANDLED;
+			}
+			if (stat & (DIE_TEMP_UB_BIT | DIE_TEMP_LB_BIT)) {
+				smblib_dbg(chg, PR_PARALLEL,
+					"skip ICL change DIE_TEMP %x\n", stat);
+				return IRQ_HANDLED;
+			}
+		}
+
 		rc = smblib_read(chg, AICL_STATUS_REG, &stat);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't read AICL_STATUS rc=%d\n",
@@ -2763,6 +2847,8 @@
 		rc = smblib_request_dpdm(chg, false);
 		if (rc < 0)
 			smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
+
+		smblib_update_usb_type(chg);
 	}
 
 	if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
@@ -3078,7 +3164,8 @@
 	}
 
 	if (!chg->pr_swap_in_progress)
-		chg->ok_to_pd = !(*chg->pd_disabled) || chg->early_usb_attach;
+		chg->ok_to_pd = (!(*chg->pd_disabled) || chg->early_usb_attach)
+					&& !chg->pd_not_supported;
 }
 
 static void typec_src_insertion(struct smb_charger *chg)
@@ -3097,8 +3184,8 @@
 	}
 
 	chg->typec_legacy = stat & TYPEC_LEGACY_CABLE_STATUS_BIT;
-	chg->ok_to_pd = !(chg->typec_legacy || *chg->pd_disabled)
-						|| chg->early_usb_attach;
+	chg->ok_to_pd = (!(chg->typec_legacy || *chg->pd_disabled)
+			|| chg->early_usb_attach) && !chg->pd_not_supported;
 	if (!chg->ok_to_pd) {
 		rc = smblib_configure_hvdcp_apsd(chg, true);
 		if (rc < 0) {
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
index 14eba8c..35e5dd3 100644
--- a/drivers/power/supply/qcom/smb5-lib.h
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -272,6 +272,7 @@
 	int			otg_delay_ms;
 	int			*weak_chg_icl_ua;
 	struct qpnp_vadc_chip	*vadc_dev;
+	bool			pd_not_supported;
 
 	/* locks */
 	struct mutex		lock;
@@ -359,6 +360,9 @@
 	int			auto_recharge_soc;
 	bool			jeita_configured;
 	enum sink_src_mode	sink_src_mode;
+	bool			hw_die_temp_mitigation;
+	bool			hw_connector_mitigation;
+	int			connector_pull_up;
 
 	/* workaround flag */
 	u32			wa_flags;
@@ -491,6 +495,8 @@
 				union power_supply_propval *val);
 int smblib_get_prop_usb_voltage_max(struct smb_charger *chg,
 				union power_supply_propval *val);
+int smblib_get_prop_usb_voltage_max_design(struct smb_charger *chg,
+				union power_supply_propval *val);
 int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_typec_power_role(struct smb_charger *chg,
@@ -539,6 +545,7 @@
 int smblib_configure_wdog(struct smb_charger *chg, bool enable);
 int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val);
 int smblib_configure_hvdcp_apsd(struct smb_charger *chg, bool enable);
+int smblib_icl_override(struct smb_charger *chg, bool override);
 
 int smblib_init(struct smb_charger *chg);
 int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb5-reg.h b/drivers/power/supply/qcom/smb5-reg.h
index 79a8bd0..a925658 100644
--- a/drivers/power/supply/qcom/smb5-reg.h
+++ b/drivers/power/supply/qcom/smb5-reg.h
@@ -106,7 +106,7 @@
  *  DCDC Peripheral Registers  *
  ********************************/
 #define ICL_MAX_STATUS_REG			(DCDC_BASE + 0x06)
-
+#define ICL_STATUS_REG				(DCDC_BASE + 0x07)
 #define AICL_ICL_STATUS_REG			(DCDC_BASE + 0x08)
 
 #define AICL_STATUS_REG				(DCDC_BASE + 0x0A)
@@ -146,6 +146,18 @@
 #define SHIP_MODE_REG				(BATIF_BASE + 0x40)
 #define SHIP_MODE_EN_BIT			BIT(0)
 
+#define BATIF_ADC_CHANNEL_EN_REG		(BATIF_BASE + 0x82)
+#define CONN_THM_CHANNEL_EN_BIT			BIT(4)
+#define DIE_TEMP_CHANNEL_EN_BIT			BIT(2)
+
+#define BATIF_ADC_INTERNAL_PULL_UP_REG		(BATIF_BASE + 0x86)
+#define INTERNAL_PULL_UP_CONN_THM_MASK		GENMASK(5, 4)
+#define CONN_THM_SHIFT				4
+#define INTERNAL_PULL_NO_PULL			0x00
+#define INTERNAL_PULL_30K_PULL			0x01
+#define INTERNAL_PULL_100K_PULL			0x02
+#define INTERNAL_PULL_400K_PULL			0x03
+
 /********************************
  *  USBIN Peripheral Registers  *
  ********************************/
@@ -284,6 +296,7 @@
 
 #define TYPE_C_MODE_CFG_REG			(TYPEC_BASE + 0x44)
 #define TYPEC_POWER_ROLE_CMD_MASK		GENMASK(2, 1)
+#define EN_TRY_SNK_BIT				BIT(4)
 #define EN_SRC_ONLY_BIT				BIT(2)
 #define EN_SNK_ONLY_BIT				BIT(1)
 #define TYPEC_DISABLE_CMD_BIT			BIT(0)
@@ -332,7 +345,7 @@
 /********************************
  *  MISC Peripheral Registers  *
  ********************************/
-#define TEMP_RANGE_STATUS_REG			(MISC_BASE + 0x06)
+#define MISC_TEMP_RANGE_STATUS_REG		(MISC_BASE + 0x06)
 #define THERM_REG_ACTIVE_BIT			BIT(6)
 #define TLIM_BIT				BIT(5)
 #define TEMP_RANGE_MASK				GENMASK(4, 1)
@@ -342,6 +355,12 @@
 #define TEMP_BELOW_RANGE_BIT			BIT(1)
 #define THERMREG_DISABLED_BIT			BIT(0)
 
+#define MISC_DIE_TEMP_STATUS_REG		(MISC_BASE + 0x07)
+#define DIE_TEMP_SHDN_BIT			BIT(3)
+#define DIE_TEMP_RST_BIT			BIT(2)
+#define DIE_TEMP_UB_BIT				BIT(1)
+#define DIE_TEMP_LB_BIT				BIT(0)
+
 #define BARK_BITE_WDOG_PET_REG			(MISC_BASE + 0x43)
 #define BARK_BITE_WDOG_PET_BIT			BIT(0)
 
@@ -365,6 +384,16 @@
 #define BARK_WDOG_TIMEOUT_MASK			GENMASK(3, 2)
 #define BITE_WDOG_TIMEOUT_MASK			GENMASK(1, 0)
 
+#define MISC_THERMREG_SRC_CFG_REG		(MISC_BASE + 0x70)
+#define THERMREG_SW_ICL_ADJUST_BIT		BIT(7)
+#define DIE_ADC_SEL_BIT				BIT(6)
+#define THERMREG_SMB_ADC_SRC_EN_BIT		BIT(5)
+#define THERMREG_CONNECTOR_ADC_SRC_EN_BIT	BIT(4)
+#define SKIN_ADC_CFG_BIT			BIT(3)
+#define THERMREG_SKIN_ADC_SRC_EN_BIT		BIT(2)
+#define THERMREG_DIE_ADC_SRC_EN_BIT		BIT(1)
+#define THERMREG_DIE_CMP_SRC_EN_BIT		BIT(0)
+
 #define MISC_SMB_CFG_REG			(MISC_BASE + 0x90)
 #define SMB_EN_SEL_BIT				BIT(4)
 #define CP_EN_POLARITY_CFG_BIT			BIT(3)
diff --git a/drivers/pwm/pwm-qti-lpg.c b/drivers/pwm/pwm-qti-lpg.c
index 31f5204..d24bef1 100644
--- a/drivers/pwm/pwm-qti-lpg.c
+++ b/drivers/pwm/pwm-qti-lpg.c
@@ -19,10 +19,12 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/nvmem-consumer.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
+#include <linux/qpnp/qpnp-pbs.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
 #include <linux/types.h>
@@ -105,6 +107,34 @@
 #define LPG_LUT_VALUE_MSB_MASK		BIT(0)
 #define LPG_LUT_COUNT_MAX		47
 
+/* LPG config settings in SDAM */
+#define SDAM_REG_PBS_SEQ_EN			0x42
+#define PBS_SW_TRG_BIT				BIT(0)
+
+#define SDAM_REG_RAMP_STEP_DURATION		0x47
+
+#define SDAM_LUT_EN_OFFSET			0x0
+#define SDAM_PATTERN_CONFIG_OFFSET		0x1
+#define SDAM_END_INDEX_OFFSET			0x3
+#define SDAM_START_INDEX_OFFSET			0x4
+#define SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET	0x6
+
+/* SDAM_REG_LUT_EN */
+#define SDAM_LUT_EN_BIT				BIT(0)
+
+/* SDAM_REG_PATTERN_CONFIG */
+#define SDAM_PATTERN_LOOP_ENABLE		BIT(3)
+#define SDAM_PATTERN_RAMP_TOGGLE		BIT(2)
+#define SDAM_PATTERN_EN_PAUSE_END		BIT(1)
+#define SDAM_PATTERN_EN_PAUSE_START		BIT(0)
+
+/* SDAM_REG_PAUSE_MULTIPLIER */
+#define SDAM_PAUSE_START_SHIFT			4
+#define SDAM_PAUSE_START_MASK			GENMASK(7, 4)
+#define SDAM_PAUSE_END_MASK			GENMASK(3, 0)
+
+#define SDAM_LUT_COUNT_MAX			64
+
 enum lpg_src {
 	LUT_PATTERN = 0,
 	PWM_VALUE,
@@ -151,6 +181,7 @@
 	u32				lpg_idx;
 	u32				reg_base;
 	u32				max_pattern_length;
+	u32				lpg_sdam_base;
 	u8				src_sel;
 	u8				subtype;
 	bool				lut_written;
@@ -165,7 +196,11 @@
 	struct qpnp_lpg_channel	*lpgs;
 	struct qpnp_lpg_lut	*lut;
 	struct mutex		bus_lock;
+	struct nvmem_device	*sdam_nvmem;
+	struct device_node	*pbs_dev_node;
 	u32			num_lpgs;
+	unsigned long		pbs_en_bitmap;
+	bool			use_sdam;
 };
 
 static int qpnp_lpg_read(struct qpnp_lpg_channel *lpg, u16 addr, u8 *val)
@@ -192,7 +227,7 @@
 	mutex_lock(&lpg->chip->bus_lock);
 	rc = regmap_write(lpg->chip->regmap, lpg->reg_base + addr, val);
 	if (rc < 0)
-		dev_err(lpg->chip->dev, "Write addr 0x%x with value %d failed, rc=%d\n",
+		dev_err(lpg->chip->dev, "Write addr 0x%x with value 0x%x failed, rc=%d\n",
 				lpg->reg_base + addr, val, rc);
 	mutex_unlock(&lpg->chip->bus_lock);
 
@@ -245,6 +280,90 @@
 	return rc;
 }
 
+static int qpnp_sdam_write(struct qpnp_lpg_chip *chip, u16 addr, u8 val)
+{
+	int rc;
+
+	mutex_lock(&chip->bus_lock);
+	rc = nvmem_device_write(chip->sdam_nvmem, addr, 1, &val);
+	if (rc < 0)
+		dev_err(chip->dev, "write SDAM add 0x%x failed, rc=%d\n",
+				addr, rc);
+
+	mutex_unlock(&chip->bus_lock);
+
+	return rc > 0 ? 0 : rc;
+}
+
+static int qpnp_lpg_sdam_write(struct qpnp_lpg_channel *lpg, u16 addr, u8 val)
+{
+	struct qpnp_lpg_chip *chip = lpg->chip;
+	int rc;
+
+	mutex_lock(&chip->bus_lock);
+	rc = nvmem_device_write(chip->sdam_nvmem,
+			lpg->lpg_sdam_base + addr, 1, &val);
+	if (rc < 0)
+		dev_err(chip->dev, "write SDAM add 0x%x failed, rc=%d\n",
+				lpg->lpg_sdam_base + addr, rc);
+
+	mutex_unlock(&chip->bus_lock);
+
+	return rc > 0 ? 0 : rc;
+}
+
+static int qpnp_lpg_sdam_masked_write(struct qpnp_lpg_channel *lpg,
+					u16 addr, u8 mask, u8 val)
+{
+	int rc;
+	u8 tmp;
+	struct qpnp_lpg_chip *chip = lpg->chip;
+
+	mutex_lock(&chip->bus_lock);
+
+	rc = nvmem_device_read(chip->sdam_nvmem,
+			lpg->lpg_sdam_base + addr, 1, &tmp);
+	if (rc < 0) {
+		dev_err(chip->dev, "Read SDAM addr %d failed, rc=%d\n",
+				lpg->lpg_sdam_base + addr, rc);
+		goto unlock;
+	}
+
+	tmp = tmp & ~mask;
+	tmp |= val & mask;
+	rc = nvmem_device_write(chip->sdam_nvmem,
+			lpg->lpg_sdam_base + addr, 1, &tmp);
+	if (rc < 0)
+		dev_err(chip->dev, "write SDAM addr %d failed, rc=%d\n",
+				lpg->lpg_sdam_base + addr, rc);
+
+unlock:
+	mutex_unlock(&chip->bus_lock);
+
+	return rc > 0 ? 0 : rc;
+}
+
+static int qpnp_lut_sdam_write(struct qpnp_lpg_lut *lut,
+		u16 addr, u8 *val, size_t length)
+{
+	struct qpnp_lpg_chip *chip = lut->chip;
+	int rc;
+
+	if (addr >= SDAM_LUT_COUNT_MAX)
+		return -EINVAL;
+
+	mutex_lock(&chip->bus_lock);
+	rc = nvmem_device_write(chip->sdam_nvmem,
+			lut->reg_base + addr, length, val);
+	if (rc < 0)
+		dev_err(chip->dev, "write SDAM addr %d failed, rc=%d\n",
+				lut->reg_base + addr, rc);
+
+	mutex_unlock(&chip->bus_lock);
+
+	return rc > 0 ? 0 : rc;
+}
+
 static struct qpnp_lpg_channel *pwm_dev_to_qpnp_lpg(struct pwm_chip *pwm_chip,
 				struct pwm_device *pwm) {
 
@@ -365,14 +484,111 @@
 	return rc;
 }
 
-static int qpnp_lpg_set_lut_pattern(struct qpnp_lpg_channel *lpg,
+static int qpnp_lpg_set_sdam_lut_pattern(struct qpnp_lpg_channel *lpg,
 		unsigned int *pattern, unsigned int length)
 {
 	struct qpnp_lpg_lut *lut = lpg->chip->lut;
 	int i, rc = 0;
-	u16 full_duty_value, pwm_values[LPG_LUT_COUNT_MAX + 1] = {0};
+	u8 val[SDAM_LUT_COUNT_MAX + 1], addr;
+
+	if (length > lpg->max_pattern_length) {
+		dev_err(lpg->chip->dev, "new pattern length (%d) larger than predefined (%d)\n",
+				length, lpg->max_pattern_length);
+		return -EINVAL;
+	}
+
+	/* Program LUT pattern */
+	mutex_lock(&lut->lock);
+	addr = lpg->ramp_config.lo_idx;
+	for (i = 0; i < length; i++)
+		val[i] = pattern[i] * 255 / 100;
+
+	rc = qpnp_lut_sdam_write(lut, addr, val, length);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write pattern in SDAM failed, rc=%d",
+				rc);
+		goto unlock;
+	}
+
+	lpg->ramp_config.pattern_length = length;
+unlock:
+	mutex_unlock(&lut->lock);
+
+	return rc;
+}
+
+static int qpnp_lpg_set_sdam_ramp_config(struct qpnp_lpg_channel *lpg)
+{
+	struct lpg_ramp_config *ramp = &lpg->ramp_config;
+	u8 addr, mask, val;
+	int rc = 0;
+
+	/* clear PBS scatchpad register */
+	val = 0;
+	rc = qpnp_lpg_sdam_write(lpg,
+			SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET, val);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET failed, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	/* Set ramp step duration, one WAIT_TICK is 7.8ms */
+	val = (ramp->step_ms * 1000 / 7800) & 0xff;
+	if (val > 0)
+		val--;
+	addr = SDAM_REG_RAMP_STEP_DURATION;
+	rc = qpnp_sdam_write(lpg->chip, addr, val);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write SDAM_REG_RAMP_STEP_DURATION failed, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	/* Set hi_idx and lo_idx */
+	rc = qpnp_lpg_sdam_write(lpg, SDAM_END_INDEX_OFFSET, ramp->hi_idx);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write SDAM_REG_END_INDEX failed, rc=%d\n",
+					rc);
+		return rc;
+	}
+
+	rc = qpnp_lpg_sdam_write(lpg, SDAM_START_INDEX_OFFSET,
+						ramp->lo_idx);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write SDAM_REG_START_INDEX failed, rc=%d\n",
+					rc);
+		return rc;
+	}
+
+	/* Set LPG_PATTERN_CONFIG */
+	addr = SDAM_PATTERN_CONFIG_OFFSET;
+	mask = SDAM_PATTERN_LOOP_ENABLE;
+	val = 0;
+	if (ramp->pattern_repeat)
+		val |= SDAM_PATTERN_LOOP_ENABLE;
+
+	rc = qpnp_lpg_sdam_masked_write(lpg, addr, mask, val);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write SDAM_REG_PATTERN_CONFIG failed, rc=%d\n",
+					rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int qpnp_lpg_set_lut_pattern(struct qpnp_lpg_channel *lpg,
+		unsigned int *pattern, unsigned int length)
+{
+	struct qpnp_lpg_lut *lut = lpg->chip->lut;
+	u16 full_duty_value, pwm_values[SDAM_LUT_COUNT_MAX + 1] = {0};
+	int i, rc = 0;
 	u8 lsb, msb, addr;
 
+	if (lpg->chip->use_sdam)
+		return qpnp_lpg_set_sdam_lut_pattern(lpg, pattern, length);
+
 	if (length > lpg->max_pattern_length) {
 		dev_err(lpg->chip->dev, "new pattern length (%d) larger than predefined (%d)\n",
 				length, lpg->max_pattern_length);
@@ -426,6 +642,9 @@
 	u8 lsb, msb, addr, mask, val;
 	int rc = 0;
 
+	if (lpg->chip->use_sdam)
+		return qpnp_lpg_set_sdam_ramp_config(lpg);
+
 	/* Set ramp step duration */
 	lsb = ramp->step_ms & 0xff;
 	msb = ramp->step_ms >> 8;
@@ -507,6 +726,8 @@
 static void __qpnp_lpg_calc_pwm_period(int period_ns,
 			struct lpg_pwm_config *pwm_config)
 {
+	struct qpnp_lpg_channel *lpg = container_of(pwm_config,
+			struct qpnp_lpg_channel, pwm_config);
 	struct lpg_pwm_config configs[NUM_PWM_SIZE];
 	int i, j, m, n;
 	int tmp1, tmp2;
@@ -522,7 +743,12 @@
 	 *
 	 * Searching the closest settings for the requested PWM period.
 	 */
-	for (n = 0; n < ARRAY_SIZE(pwm_size); n++) {
+	if (lpg->chip->use_sdam)
+		/* SDAM pattern control can only use 9 bit resolution */
+		n = 1;
+	else
+		n = 0;
+	for (; n < ARRAY_SIZE(pwm_size); n++) {
 		pwm_clk_period_ns = period_ns >> pwm_size[n];
 		for (i = ARRAY_SIZE(clk_freq_hz) - 1; i >= 0; i--) {
 			for (j = 0; j < ARRAY_SIZE(clk_prediv); j++) {
@@ -654,6 +880,45 @@
 	return rc;
 }
 
+static int qpnp_lpg_pbs_trigger_enable(struct qpnp_lpg_channel *lpg, bool en)
+{
+	struct qpnp_lpg_chip *chip = lpg->chip;
+	int rc = 0;
+
+	if (en) {
+		if (chip->pbs_en_bitmap == 0) {
+			rc = qpnp_sdam_write(chip, SDAM_REG_PBS_SEQ_EN,
+					PBS_SW_TRG_BIT);
+			if (rc < 0) {
+				dev_err(chip->dev, "Write SDAM_REG_PBS_SEQ_EN failed, rc=%d\n",
+						rc);
+				return rc;
+			}
+
+			rc = qpnp_pbs_trigger_event(chip->pbs_dev_node,
+					PBS_SW_TRG_BIT);
+			if (rc < 0) {
+				dev_err(chip->dev, "Failed to trigger PBS, rc=%d\n",
+						rc);
+				return rc;
+			}
+		}
+		set_bit(lpg->lpg_idx, &chip->pbs_en_bitmap);
+	} else {
+		clear_bit(lpg->lpg_idx, &chip->pbs_en_bitmap);
+		if (chip->pbs_en_bitmap == 0) {
+			rc = qpnp_sdam_write(chip, SDAM_REG_PBS_SEQ_EN, 0);
+			if (rc < 0) {
+				dev_err(chip->dev, "Write SDAM_REG_PBS_SEQ_EN failed, rc=%d\n",
+						rc);
+				return rc;
+			}
+		}
+	}
+
+	return rc;
+}
+
 static int qpnp_lpg_pwm_src_enable(struct qpnp_lpg_channel *lpg, bool en)
 {
 	struct qpnp_lpg_chip *chip = lpg->chip;
@@ -665,7 +930,7 @@
 					LPG_EN_RAMP_GEN_MASK;
 	val = lpg->src_sel << LPG_PWM_SRC_SELECT_SHIFT;
 
-	if (lpg->src_sel == LUT_PATTERN)
+	if (lpg->src_sel == LUT_PATTERN && !chip->use_sdam)
 		val |= 1 << LPG_EN_RAMP_GEN_SHIFT;
 
 	if (en)
@@ -678,6 +943,27 @@
 		return rc;
 	}
 
+	if (chip->use_sdam) {
+		if (lpg->src_sel == LUT_PATTERN && en) {
+			val = SDAM_LUT_EN_BIT;
+			en = true;
+		} else {
+			val = 0;
+			en = false;
+		}
+
+		rc = qpnp_lpg_sdam_write(lpg, SDAM_LUT_EN_OFFSET, val);
+		if (rc < 0) {
+			dev_err(chip->dev, "Write SDAM_REG_LUT_EN failed, rc=%d\n",
+					rc);
+			return rc;
+		}
+
+		qpnp_lpg_pbs_trigger_enable(lpg, en);
+
+		return rc;
+	}
+
 	if (lpg->src_sel == LUT_PATTERN && en) {
 		mutex_lock(&lut->lock);
 		val = 1 << lpg->lpg_idx;
@@ -697,6 +983,7 @@
 	struct qpnp_lpg_channel *lpg;
 	enum lpg_src src_sel;
 	int rc;
+	bool is_enabled;
 
 	lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
 	if (lpg == NULL) {
@@ -714,6 +1001,23 @@
 	if (src_sel == lpg->src_sel)
 		return 0;
 
+	is_enabled = pwm_is_enabled(pwm);
+	if (is_enabled) {
+		/*
+		 * Disable the channel first then enable it later to make
+		 * sure the output type is changed successfully. This is
+		 * especially useful in SDAM use case to stop the PBS
+		 * sequence when changing the PWM output type from
+		 * MODULATED to FIXED.
+		 */
+		rc = qpnp_lpg_pwm_src_enable(lpg, false);
+		if (rc < 0) {
+			dev_err(pwm_chip->dev, "Enable PWM output failed for channel %d, rc=%d\n",
+					lpg->lpg_idx, rc);
+			return rc;
+		}
+	}
+
 	if (src_sel == LUT_PATTERN) {
 		/* program LUT if it's never been programmed */
 		if (!lpg->lut_written) {
@@ -738,7 +1042,14 @@
 
 	lpg->src_sel = src_sel;
 
-	if (pwm_is_enabled(pwm)) {
+	if (is_enabled) {
+		rc = qpnp_lpg_set_pwm_config(lpg);
+		if (rc < 0) {
+			dev_err(pwm_chip->dev, "Config PWM failed for channel %d, rc=%d\n",
+							lpg->lpg_idx, rc);
+			return rc;
+		}
+
 		rc = qpnp_lpg_pwm_src_enable(lpg, true);
 		if (rc < 0) {
 			dev_err(pwm_chip->dev, "Enable PWM output failed for channel %d, rc=%d\n",
@@ -979,7 +1290,7 @@
 	struct qpnp_lpg_channel *lpg;
 	struct lpg_ramp_config *ramp;
 	int rc = 0, i;
-	u32 base, length, lpg_chan_id, tmp;
+	u32 base, length, lpg_chan_id, tmp, max_count;
 	const __be32 *addr;
 
 	addr = of_get_address(chip->dev->of_node, 0, NULL, NULL);
@@ -1010,18 +1321,47 @@
 		}
 	}
 
-	addr = of_get_address(chip->dev->of_node, 1, NULL, NULL);
-	if (!addr) {
-		pr_debug("NO LUT address assigned\n");
-		return 0;
-	}
-
 	chip->lut = devm_kmalloc(chip->dev, sizeof(*chip->lut), GFP_KERNEL);
 	if (!chip->lut)
 		return -ENOMEM;
 
+	chip->sdam_nvmem = devm_nvmem_device_get(chip->dev, "ppg_sdam");
+	if (IS_ERR_OR_NULL(chip->sdam_nvmem)) {
+		if (PTR_ERR(chip->sdam_nvmem) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+
+		addr = of_get_address(chip->dev->of_node, 1, NULL, NULL);
+		if (!addr) {
+			pr_debug("NO LUT address assigned\n");
+			devm_kfree(chip->dev, chip->lut);
+			chip->lut = NULL;
+			return 0;
+		}
+
+		chip->lut->reg_base = be32_to_cpu(*addr);
+		max_count = LPG_LUT_COUNT_MAX;
+	} else {
+		chip->use_sdam = true;
+		chip->pbs_dev_node = of_parse_phandle(chip->dev->of_node,
+				"qcom,pbs-client", 0);
+		if (!chip->pbs_dev_node) {
+			dev_err(chip->dev, "Missing qcom,pbs-client property\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(chip->dev->of_node,
+				"qcom,lut-sdam-base",
+				&chip->lut->reg_base);
+		if (rc < 0) {
+			dev_err(chip->dev, "Read qcom,lut-sdam-base failed, rc=%d\n",
+					rc);
+			return rc;
+		}
+
+		max_count = SDAM_LUT_COUNT_MAX;
+	}
+
 	chip->lut->chip = chip;
-	chip->lut->reg_base = be32_to_cpu(*addr);
 	mutex_init(&chip->lut->lock);
 
 	rc = of_property_count_elems_of_size(chip->dev->of_node,
@@ -1033,13 +1373,13 @@
 	}
 
 	length = rc;
-	if (length > LPG_LUT_COUNT_MAX) {
+	if (length > max_count) {
 		dev_err(chip->dev, "qcom,lut-patterns length %d exceed max %d\n",
-				length, LPG_LUT_COUNT_MAX);
+				length, max_count);
 		return -EINVAL;
 	}
 
-	chip->lut->pattern = devm_kcalloc(chip->dev, LPG_LUT_COUNT_MAX,
+	chip->lut->pattern = devm_kcalloc(chip->dev, max_count,
 			sizeof(*chip->lut->pattern), GFP_KERNEL);
 	if (!chip->lut->pattern)
 		return -ENOMEM;
@@ -1066,12 +1406,24 @@
 			return rc;
 		}
 
-		if (lpg_chan_id > chip->num_lpgs) {
+		if (lpg_chan_id < 1 || lpg_chan_id > chip->num_lpgs) {
 			dev_err(chip->dev, "lpg-chann-id %d is out of range 1~%d\n",
 					lpg_chan_id, chip->num_lpgs);
 			return -EINVAL;
 		}
 
+		if (chip->use_sdam) {
+			rc = of_property_read_u32(child,
+					"qcom,lpg-sdam-base",
+					&tmp);
+			if (rc < 0) {
+				dev_err(chip->dev, "get qcom,lpg-sdam-base failed for lpg%d, rc=%d\n",
+						lpg_chan_id, rc);
+				return rc;
+			}
+			chip->lpgs[lpg_chan_id - 1].lpg_sdam_base = tmp;
+		}
+
 		/* lpg channel id is indexed from 1 in hardware */
 		lpg = &chip->lpgs[lpg_chan_id - 1];
 		ramp = &lpg->ramp_config;
@@ -1091,9 +1443,9 @@
 			return rc;
 		}
 		ramp->lo_idx = (u8)tmp;
-		if (ramp->lo_idx >= LPG_LUT_COUNT_MAX) {
+		if (ramp->lo_idx >= max_count) {
 			dev_err(chip->dev, "qcom,ramp-low-index should less than max %d\n",
-					LPG_LUT_COUNT_MAX);
+						max_count);
 			return -EINVAL;
 		}
 
@@ -1105,14 +1457,14 @@
 		}
 		ramp->hi_idx = (u8)tmp;
 
-		if (ramp->hi_idx > LPG_LUT_COUNT_MAX) {
+		if (ramp->hi_idx > max_count) {
 			dev_err(chip->dev, "qcom,ramp-high-index shouldn't exceed max %d\n",
-						LPG_LUT_COUNT_MAX);
+						max_count);
 			return -EINVAL;
 		}
 
-		if (ramp->hi_idx <= ramp->lo_idx) {
-			dev_err(chip->dev, "high-index(%d) should be larger than low-index(%d)\n",
+		if (chip->use_sdam && ramp->hi_idx <= ramp->lo_idx) {
+			dev_err(chip->dev, "high-index(%d) should be larger than low-index(%d) when SDAM used\n",
 						ramp->hi_idx, ramp->lo_idx);
 			return -EINVAL;
 		}
@@ -1121,6 +1473,12 @@
 		ramp->pattern = &chip->lut->pattern[ramp->lo_idx];
 		lpg->max_pattern_length = ramp->pattern_length;
 
+		ramp->pattern_repeat = of_property_read_bool(child,
+				"qcom,ramp-pattern-repeat");
+
+		if (chip->use_sdam)
+			continue;
+
 		rc = of_property_read_u32(child,
 				"qcom,ramp-pause-hi-count", &tmp);
 		if (rc < 0)
@@ -1138,9 +1496,6 @@
 		ramp->ramp_dir_low_to_hi = of_property_read_bool(child,
 				"qcom,ramp-from-low-to-high");
 
-		ramp->pattern_repeat = of_property_read_bool(child,
-				"qcom,ramp-pattern-repeat");
-
 		ramp->toggle =  of_property_read_bool(child,
 				"qcom,ramp-toggle");
 	}
@@ -1148,6 +1503,36 @@
 	return 0;
 }
 
+static int qpnp_lpg_sdam_hw_init(struct qpnp_lpg_chip *chip)
+{
+	struct qpnp_lpg_channel *lpg;
+	int i, rc = 0;
+
+	if (!chip->use_sdam)
+		return 0;
+
+	for (i = 0; i < chip->num_lpgs; i++) {
+		lpg = &chip->lpgs[i];
+		if (lpg->lpg_sdam_base != 0) {
+			rc = qpnp_lpg_sdam_write(lpg, SDAM_LUT_EN_OFFSET, 0);
+			if (rc < 0) {
+				dev_err(chip->dev, "Write SDAM_REG_LUT_EN failed, rc=%d\n",
+						rc);
+				return rc;
+			}
+			rc = qpnp_lpg_sdam_write(lpg,
+					SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET, 0);
+			if (rc < 0) {
+				dev_err(lpg->chip->dev, "Write SDAM_REG_PBS_SCRATCH_LUT_COUNTER failed, rc=%d\n",
+						rc);
+				return rc;
+			}
+		}
+	}
+
+	return rc;
+}
+
 static int qpnp_lpg_probe(struct platform_device *pdev)
 {
 	int rc;
@@ -1172,6 +1557,13 @@
 		goto err_out;
 	}
 
+	rc = qpnp_lpg_sdam_hw_init(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "SDAM HW init failed, rc=%d\n",
+				rc);
+		goto err_out;
+	}
+
 	dev_set_drvdata(chip->dev, chip);
 	chip->pwm_chip.dev = chip->dev;
 	chip->pwm_chip.base = -1;
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 83e89e5..b73a237 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -268,8 +268,7 @@
 	drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
 	if (drvdata->desc.name == NULL) {
 		dev_err(&pdev->dev, "Failed to allocate supply name\n");
-		ret = -ENOMEM;
-		goto err;
+		return -ENOMEM;
 	}
 
 	if (config->nr_gpios != 0) {
@@ -289,7 +288,7 @@
 				dev_err(&pdev->dev,
 					"Could not obtain regulator setting GPIOs: %d\n",
 					ret);
-			goto err_memstate;
+			goto err_memgpio;
 		}
 	}
 
@@ -300,7 +299,7 @@
 	if (drvdata->states == NULL) {
 		dev_err(&pdev->dev, "Failed to allocate state data\n");
 		ret = -ENOMEM;
-		goto err_memgpio;
+		goto err_stategpio;
 	}
 	drvdata->nr_states = config->nr_states;
 
@@ -321,7 +320,7 @@
 	default:
 		dev_err(&pdev->dev, "No regulator type set\n");
 		ret = -EINVAL;
-		goto err_memgpio;
+		goto err_memstate;
 	}
 
 	/* build initial state from gpio init data. */
@@ -358,22 +357,21 @@
 	if (IS_ERR(drvdata->dev)) {
 		ret = PTR_ERR(drvdata->dev);
 		dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
-		goto err_stategpio;
+		goto err_memstate;
 	}
 
 	platform_set_drvdata(pdev, drvdata);
 
 	return 0;
 
-err_stategpio:
-	gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
 err_memstate:
 	kfree(drvdata->states);
+err_stategpio:
+	gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
 err_memgpio:
 	kfree(drvdata->gpios);
 err_name:
 	kfree(drvdata->desc.name);
-err:
 	return ret;
 }
 
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 4f613ec..037675b 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -282,6 +282,7 @@
 				dev_err(dev,
 					"failed to parse DT for regulator %s\n",
 					child->name);
+				of_node_put(child);
 				return -EINVAL;
 			}
 			match->of_node = of_node_get(child);
diff --git a/drivers/regulator/qpnp-lcdb-regulator.c b/drivers/regulator/qpnp-lcdb-regulator.c
index 07a6198..cc01399 100644
--- a/drivers/regulator/qpnp-lcdb-regulator.c
+++ b/drivers/regulator/qpnp-lcdb-regulator.c
@@ -167,6 +167,8 @@
 #define PM660_BST_HEADROOM_DEFAULT_MV	200
 #define BST_HEADROOM_DEFAULT_MV		150
 
+#define PMIC5_LCDB_OFF_ON_DELAY_US	20000
+
 struct ldo_regulator {
 	struct regulator_desc		rdesc;
 	struct regulator_dev		*rdev;
@@ -1340,22 +1342,27 @@
 
 static int qpnp_lcdb_regulator_register(struct qpnp_lcdb *lcdb, u8 type)
 {
-	int rc = 0;
+	int rc = 0, off_on_delay = 0;
 	struct regulator_init_data *init_data;
 	struct regulator_config cfg = {};
 	struct regulator_desc *rdesc;
 	struct regulator_dev *rdev;
 	struct device_node *node;
 
+	if (lcdb->pmic_rev_id->pmic_subtype != PM660L_SUBTYPE)
+		off_on_delay = PMIC5_LCDB_OFF_ON_DELAY_US;
+
 	if (type == LDO) {
 		node			= lcdb->ldo.node;
 		rdesc			= &lcdb->ldo.rdesc;
 		rdesc->ops		= &qpnp_lcdb_ldo_ops;
+		rdesc->off_on_delay	= off_on_delay;
 		rdev			= lcdb->ldo.rdev;
 	} else if (type == NCP) {
 		node			= lcdb->ncp.node;
 		rdesc			= &lcdb->ncp.rdesc;
 		rdesc->ops		= &qpnp_lcdb_ncp_ops;
+		rdesc->off_on_delay	= off_on_delay;
 		rdev			= lcdb->ncp.rdev;
 	} else {
 		pr_err("Invalid regulator type %d\n", type);
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
index e1cfa06..e79f2a1 100644
--- a/drivers/rtc/hctosys.c
+++ b/drivers/rtc/hctosys.c
@@ -49,6 +49,11 @@
 
 	tv64.tv_sec = rtc_tm_to_time64(&tm);
 
+#if BITS_PER_LONG == 32
+	if (tv64.tv_sec > INT_MAX)
+		goto err_read;
+#endif
+
 	err = do_settimeofday64(&tv64);
 
 	dev_info(rtc->dev.parent,
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index aa53fce..180ec1f 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -57,7 +57,7 @@
 
 static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
 {
-	long rc = OPAL_BUSY;
+	s64 rc = OPAL_BUSY;
 	int retries = 10;
 	u32 y_m_d;
 	u64 h_m_s_ms;
@@ -66,13 +66,17 @@
 
 	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
 		rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
-		if (rc == OPAL_BUSY_EVENT)
+		if (rc == OPAL_BUSY_EVENT) {
+			msleep(OPAL_BUSY_DELAY_MS);
 			opal_poll_events(NULL);
-		else if (retries-- && (rc == OPAL_HARDWARE
-				       || rc == OPAL_INTERNAL_ERROR))
-			msleep(10);
-		else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
-			break;
+		} else if (rc == OPAL_BUSY) {
+			msleep(OPAL_BUSY_DELAY_MS);
+		} else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
+			if (retries--) {
+				msleep(10); /* Wait 10ms before retry */
+				rc = OPAL_BUSY; /* go around again */
+			}
+		}
 	}
 
 	if (rc != OPAL_SUCCESS)
@@ -87,21 +91,26 @@
 
 static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
 {
-	long rc = OPAL_BUSY;
+	s64 rc = OPAL_BUSY;
 	int retries = 10;
 	u32 y_m_d = 0;
 	u64 h_m_s_ms = 0;
 
 	tm_to_opal(tm, &y_m_d, &h_m_s_ms);
+
 	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
 		rc = opal_rtc_write(y_m_d, h_m_s_ms);
-		if (rc == OPAL_BUSY_EVENT)
+		if (rc == OPAL_BUSY_EVENT) {
+			msleep(OPAL_BUSY_DELAY_MS);
 			opal_poll_events(NULL);
-		else if (retries-- && (rc == OPAL_HARDWARE
-				       || rc == OPAL_INTERNAL_ERROR))
-			msleep(10);
-		else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
-			break;
+		} else if (rc == OPAL_BUSY) {
+			msleep(OPAL_BUSY_DELAY_MS);
+		} else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
+			if (retries--) {
+				msleep(10); /* Wait 10ms before retry */
+				rc = OPAL_BUSY; /* go around again */
+			}
+		}
 	}
 
 	return rc == OPAL_SUCCESS ? 0 : -EIO;
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index a753ef9..3e8fd33 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -132,20 +132,23 @@
 {
 	struct snvs_rtc_data *data = dev_get_drvdata(dev);
 	unsigned long time;
+	int ret;
 
 	rtc_tm_to_time(tm, &time);
 
 	/* Disable RTC first */
-	snvs_rtc_enable(data, false);
+	ret = snvs_rtc_enable(data, false);
+	if (ret)
+		return ret;
 
 	/* Write 32-bit time to 47-bit timer, leaving 15 LSBs blank */
 	regmap_write(data->regmap, data->offset + SNVS_LPSRTCLR, time << CNTR_TO_SECS_SH);
 	regmap_write(data->regmap, data->offset + SNVS_LPSRTCMR, time >> (32 - CNTR_TO_SECS_SH));
 
 	/* Enable RTC again */
-	snvs_rtc_enable(data, true);
+	ret = snvs_rtc_enable(data, true);
 
-	return 0;
+	return ret;
 }
 
 static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -287,7 +290,11 @@
 	regmap_write(data->regmap, data->offset + SNVS_LPSR, 0xffffffff);
 
 	/* Enable RTC */
-	snvs_rtc_enable(data, true);
+	ret = snvs_rtc_enable(data, true);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to enable rtc %d\n", ret);
+		goto error_rtc_device_register;
+	}
 
 	device_init_wakeup(&pdev->dev, true);
 
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 560d9a5..a952808 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -86,7 +86,8 @@
 	for (i = 2; i < 6; i++)
 		buf[i] = __raw_readl(&rtcreg->dat);
 	spin_unlock_irq(&pdata->lock);
-	sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
+	sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) |
+		(buf[3] << 8) | buf[2];
 	rtc_time_to_tm(sec, tm);
 	return rtc_valid_tm(tm);
 }
@@ -147,7 +148,8 @@
 	alrm->enabled = (ctl & TX4939_RTCCTL_ALME) ? 1 : 0;
 	alrm->pending = (ctl & TX4939_RTCCTL_ALMD) ? 1 : 0;
 	spin_unlock_irq(&pdata->lock);
-	sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
+	sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) |
+		(buf[3] << 8) | buf[2];
 	rtc_time_to_tm(sec, &alrm->time);
 	return rtc_valid_tm(&alrm->time);
 }
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 1e56018..e453d2a 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -591,13 +591,22 @@
 int dasd_alias_add_device(struct dasd_device *device)
 {
 	struct dasd_eckd_private *private = device->private;
-	struct alias_lcu *lcu;
+	__u8 uaddr = private->uid.real_unit_addr;
+	struct alias_lcu *lcu = private->lcu;
 	unsigned long flags;
 	int rc;
 
-	lcu = private->lcu;
 	rc = 0;
 	spin_lock_irqsave(&lcu->lock, flags);
+	/*
+	 * Check if device and lcu type differ. If so, the uac data may be
+	 * outdated and needs to be updated.
+	 */
+	if (private->uid.type !=  lcu->uac->unit[uaddr].ua_type) {
+		lcu->flags |= UPDATE_PENDING;
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "uid type mismatch - trigger rescan");
+	}
 	if (!(lcu->flags & UPDATE_PENDING)) {
 		rc = _add_device_to_lcu(lcu, device, device);
 		if (rc)
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 41e28b2..8ac27ef 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -2,6 +2,8 @@
 # S/390 character devices
 #
 
+CFLAGS_REMOVE_sclp_early_core.o	+= $(CC_FLAGS_EXPOLINE)
+
 obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
 	 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
 	 sclp_early.o
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1167469..67903c9 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -451,6 +451,7 @@
 
 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
 {
+	struct channel_path *chp;
 	struct chp_link link;
 	struct chp_id chpid;
 	int status;
@@ -463,10 +464,17 @@
 	chpid.id = sei_area->rsid;
 	/* allocate a new channel path structure, if needed */
 	status = chp_get_status(chpid);
-	if (status < 0)
-		chp_new(chpid);
-	else if (!status)
+	if (!status)
 		return;
+
+	if (status < 0) {
+		chp_new(chpid);
+	} else {
+		chp = chpid_to_chp(chpid);
+		mutex_lock(&chp->lock);
+		chp_update_desc(chp);
+		mutex_unlock(&chp->lock);
+	}
 	memset(&link, 0, sizeof(struct chp_link));
 	link.chpid = chpid;
 	if ((sei_area->vf & 0xc0) != 0) {
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 8327d47..c46e31e 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -822,6 +822,7 @@
 
 	ccw_device_set_timeout(cdev, 0);
 	cdev->private->iretry = 255;
+	cdev->private->async_kill_io_rc = -ETIMEDOUT;
 	ret = ccw_device_cancel_halt_clear(cdev);
 	if (ret == -EBUSY) {
 		ccw_device_set_timeout(cdev, 3*HZ);
@@ -898,7 +899,7 @@
 	/* OK, i/o is dead now. Call interrupt handler. */
 	if (cdev->handler)
 		cdev->handler(cdev, cdev->private->intparm,
-			      ERR_PTR(-EIO));
+			      ERR_PTR(cdev->private->async_kill_io_rc));
 }
 
 static void
@@ -915,14 +916,16 @@
 	ccw_device_online_verify(cdev, 0);
 	if (cdev->handler)
 		cdev->handler(cdev, cdev->private->intparm,
-			      ERR_PTR(-EIO));
+			      ERR_PTR(cdev->private->async_kill_io_rc));
 }
 
 void ccw_device_kill_io(struct ccw_device *cdev)
 {
 	int ret;
 
+	ccw_device_set_timeout(cdev, 0);
 	cdev->private->iretry = 255;
+	cdev->private->async_kill_io_rc = -EIO;
 	ret = ccw_device_cancel_halt_clear(cdev);
 	if (ret == -EBUSY) {
 		ccw_device_set_timeout(cdev, 3*HZ);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 877d9f6..85b2896 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -158,7 +158,7 @@
 }
 
 /**
- * ccw_device_start_key() - start a s390 channel program with key
+ * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
  * @cdev: target ccw device
  * @cpa: logical start address of channel program
  * @intparm: user specific interruption parameter; will be presented back to
@@ -169,10 +169,15 @@
  * @key: storage key to be used for the I/O
  * @flags: additional flags; defines the action to be performed for I/O
  *	   processing.
+ * @expires: timeout value in jiffies
  *
  * Start a S/390 channel program. When the interrupt arrives, the
  * IRQ handler is called, either immediately, delayed (dev-end missing,
  * or sense required) or never (no IRQ handler registered).
+ * This function notifies the device driver if the channel program has not
+ * completed during the time specified by @expires. If a timeout occurs, the
+ * channel program is terminated via xsch, hsch or csch, and the device's
+ * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
  * Returns:
  *  %0, if the operation was successful;
  *  -%EBUSY, if the device is busy, or status pending;
@@ -181,9 +186,9 @@
  * Context:
  *  Interrupts disabled, ccw device lock held
  */
-int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
-			 unsigned long intparm, __u8 lpm, __u8 key,
-			 unsigned long flags)
+int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
+				 unsigned long intparm, __u8 lpm, __u8 key,
+				 unsigned long flags, int expires)
 {
 	struct subchannel *sch;
 	int ret;
@@ -223,6 +228,8 @@
 	switch (ret) {
 	case 0:
 		cdev->private->intparm = intparm;
+		if (expires)
+			ccw_device_set_timeout(cdev, expires);
 		break;
 	case -EACCES:
 	case -ENODEV:
@@ -233,7 +240,7 @@
 }
 
 /**
- * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
+ * ccw_device_start_key() - start a s390 channel program with key
  * @cdev: target ccw device
  * @cpa: logical start address of channel program
  * @intparm: user specific interruption parameter; will be presented back to
@@ -244,15 +251,10 @@
  * @key: storage key to be used for the I/O
  * @flags: additional flags; defines the action to be performed for I/O
  *	   processing.
- * @expires: timeout value in jiffies
  *
  * Start a S/390 channel program. When the interrupt arrives, the
  * IRQ handler is called, either immediately, delayed (dev-end missing,
  * or sense required) or never (no IRQ handler registered).
- * This function notifies the device driver if the channel program has not
- * completed during the time specified by @expires. If a timeout occurs, the
- * channel program is terminated via xsch, hsch or csch, and the device's
- * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
  * Returns:
  *  %0, if the operation was successful;
  *  -%EBUSY, if the device is busy, or status pending;
@@ -261,19 +263,12 @@
  * Context:
  *  Interrupts disabled, ccw device lock held
  */
-int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
-				 unsigned long intparm, __u8 lpm, __u8 key,
-				 unsigned long flags, int expires)
+int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
+			 unsigned long intparm, __u8 lpm, __u8 key,
+			 unsigned long flags)
 {
-	int ret;
-
-	if (!cdev)
-		return -ENODEV;
-	ccw_device_set_timeout(cdev, expires);
-	ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
-	if (ret != 0)
-		ccw_device_set_timeout(cdev, 0);
-	return ret;
+	return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key,
+					    flags, 0);
 }
 
 /**
@@ -488,18 +483,20 @@
 EXPORT_SYMBOL(ccw_device_get_id);
 
 /**
- * ccw_device_tm_start_key() - perform start function
+ * ccw_device_tm_start_timeout_key() - perform start function
  * @cdev: ccw device on which to perform the start function
  * @tcw: transport-command word to be started
  * @intparm: user defined parameter to be passed to the interrupt handler
  * @lpm: mask of paths to use
  * @key: storage key to use for storage access
+ * @expires: time span in jiffies after which to abort request
  *
  * Start the tcw on the given ccw device. Return zero on success, non-zero
  * otherwise.
  */
-int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
-			    unsigned long intparm, u8 lpm, u8 key)
+int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
+				    unsigned long intparm, u8 lpm, u8 key,
+				    int expires)
 {
 	struct subchannel *sch;
 	int rc;
@@ -526,37 +523,32 @@
 			return -EACCES;
 	}
 	rc = cio_tm_start_key(sch, tcw, lpm, key);
-	if (rc == 0)
+	if (rc == 0) {
 		cdev->private->intparm = intparm;
+		if (expires)
+			ccw_device_set_timeout(cdev, expires);
+	}
 	return rc;
 }
-EXPORT_SYMBOL(ccw_device_tm_start_key);
+EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
 
 /**
- * ccw_device_tm_start_timeout_key() - perform start function
+ * ccw_device_tm_start_key() - perform start function
  * @cdev: ccw device on which to perform the start function
  * @tcw: transport-command word to be started
  * @intparm: user defined parameter to be passed to the interrupt handler
  * @lpm: mask of paths to use
  * @key: storage key to use for storage access
- * @expires: time span in jiffies after which to abort request
  *
  * Start the tcw on the given ccw device. Return zero on success, non-zero
  * otherwise.
  */
-int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
-				    unsigned long intparm, u8 lpm, u8 key,
-				    int expires)
+int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
+			    unsigned long intparm, u8 lpm, u8 key)
 {
-	int ret;
-
-	ccw_device_set_timeout(cdev, expires);
-	ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key);
-	if (ret != 0)
-		ccw_device_set_timeout(cdev, 0);
-	return ret;
+	return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0);
 }
-EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
+EXPORT_SYMBOL(ccw_device_tm_start_key);
 
 /**
  * ccw_device_tm_start() - perform start function
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 220f491..1d98434 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -154,6 +154,7 @@
 	unsigned long intparm;	/* user interruption parameter */
 	struct qdio_irq *qdio_data;
 	struct irb irb;		/* device status */
+	int async_kill_io_rc;
 	struct senseid senseid;	/* SenseID info */
 	struct pgid pgid[8];	/* path group IDs per chpid*/
 	struct ccw1 iccws[2];	/* ccws for SNID/SID/SPGID commands */
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 48b3866..3528690 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -140,7 +140,7 @@
 	int i;
 
 	for (i = 0; i < nr_queues; i++) {
-		q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
+		q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
 		if (!q)
 			return -ENOMEM;
 
@@ -456,7 +456,6 @@
 {
 	struct ciw *ciw;
 	struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
-	int rc;
 
 	memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
 	memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
@@ -493,16 +492,14 @@
 	ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
 	if (!ciw) {
 		DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
-		rc = -EINVAL;
-		goto out_err;
+		return -EINVAL;
 	}
 	irq_ptr->equeue = *ciw;
 
 	ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
 	if (!ciw) {
 		DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
-		rc = -EINVAL;
-		goto out_err;
+		return -EINVAL;
 	}
 	irq_ptr->aqueue = *ciw;
 
@@ -510,9 +507,6 @@
 	irq_ptr->orig_handler = init_data->cdev->handler;
 	init_data->cdev->handler = qdio_int_handler;
 	return 0;
-out_err:
-	qdio_release_memory(irq_ptr);
-	return rc;
 }
 
 void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 34367d1..4534a7c 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
  *
  * Debug traces for zfcp.
  *
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -287,6 +287,27 @@
 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
 
+/**
+ * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
+ * @tag: identifier for event
+ * @adapter: adapter on which the erp_action should run
+ * @port: remote port involved in the erp_action
+ * @sdev: scsi device involved in the erp_action
+ * @want: wanted erp_action
+ * @need: required erp_action
+ *
+ * The adapter->erp_lock must not be held.
+ */
+void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
+			    struct zfcp_port *port, struct scsi_device *sdev,
+			    u8 want, u8 need)
+{
+	unsigned long flags;
+
+	read_lock_irqsave(&adapter->erp_lock, flags);
+	zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
+	read_unlock_irqrestore(&adapter->erp_lock, flags);
+}
 
 /**
  * zfcp_dbf_rec_run_lvl - trace event related to running recovery
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 21c8c68..7a7984a 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
  *
  * External function declarations.
  *
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2018
  */
 
 #ifndef ZFCP_EXT_H
@@ -34,6 +34,9 @@
 extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
 extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
 			      struct zfcp_port *, struct scsi_device *, u8, u8);
+extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
+				   struct zfcp_port *port,
+				   struct scsi_device *sdev, u8 want, u8 need);
 extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
 extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
 				 struct zfcp_erp_action *erp);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index a9b8104..bb99db2 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
  *
  * Interface to Linux SCSI midlayer.
  *
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -616,9 +616,9 @@
 	ids.port_id = port->d_id;
 	ids.roles = FC_RPORT_ROLE_FCP_TARGET;
 
-	zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
-			  ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
-			  ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
+	zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL,
+			       ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
+			       ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
 	rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
 	if (!rport) {
 		dev_err(&port->adapter->ccw_device->dev,
@@ -640,9 +640,9 @@
 	struct fc_rport *rport = port->rport;
 
 	if (rport) {
-		zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
-				  ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
-				  ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
+		zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL,
+				       ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
+				       ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
 		fc_remote_port_delete(rport);
 		port->rport = NULL;
 	}
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index e2962f1..fe670b6 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1374,9 +1374,10 @@
 	host = aac->scsi_host_ptr;
 	scsi_block_requests(host);
 	aac_adapter_disable_int(aac);
-	if (aac->thread->pid != current->pid) {
+	if (aac->thread && aac->thread->pid != current->pid) {
 		spin_unlock_irq(host->host_lock);
 		kthread_stop(aac->thread);
+		aac->thread = NULL;
 		jafo = 1;
 	}
 
@@ -1445,6 +1446,7 @@
 					  aac->name);
 		if (IS_ERR(aac->thread)) {
 			retval = PTR_ERR(aac->thread);
+			aac->thread = NULL;
 			goto out;
 		}
 	}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index d5b26fa..ad902a6 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1083,6 +1083,7 @@
 				up(&fib->event_wait);
 		}
 		kthread_stop(aac->thread);
+		aac->thread = NULL;
 	}
 	aac_adapter_disable_int(aac);
 	cpu = cpumask_first(cpu_online_mask);
@@ -1203,8 +1204,10 @@
 	 *	Map in the registers from the adapter.
 	 */
 	aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
-	if ((*aac_drivers[index].init)(aac))
+	if ((*aac_drivers[index].init)(aac)) {
+		error = -ENODEV;
 		goto out_unmap;
+	}
 
 	if (aac->sync_mode) {
 		if (aac_sync_mode)
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 2438879..936e8c7 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2011,7 +2011,7 @@
 		 * have valid data in the sense buffer that could
 		 * confuse the higher levels.
 		 */
-		memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
+		memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
 //printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id);
 //{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); }
 	/*
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index f501095..bd39590 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1869,6 +1869,7 @@
 		/* we will not receive ABTS response for this IO */
 		BNX2FC_IO_DBG(io_req, "Timer context finished processing "
 			   "this scsi cmd\n");
+		return;
 	}
 
 	/* Cancel the timeout_work, as we received IO completion */
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 519dac4..9a8c2f9 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -222,6 +222,7 @@
 static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
 {
 	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
+	struct domain_device *dev = cmd_to_domain_dev(cmd);
 	struct sas_task *task = TO_SAS_TASK(cmd);
 
 	/* At this point, we only get called following an actual abort
@@ -230,6 +231,14 @@
 	 */
 	sas_end_task(cmd, task);
 
+	if (dev_is_sata(dev)) {
+		/* defer commands to libata so that libata EH can
+		 * handle ata qcs correctly
+		 */
+		list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
+		return;
+	}
+
 	/* now finish the command and move it on to the error
 	 * handler done list, this also takes it off the
 	 * error handler pending list.
@@ -237,22 +246,6 @@
 	scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
 }
 
-static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
-{
-	struct domain_device *dev = cmd_to_domain_dev(cmd);
-	struct sas_ha_struct *ha = dev->port->ha;
-	struct sas_task *task = TO_SAS_TASK(cmd);
-
-	if (!dev_is_sata(dev)) {
-		sas_eh_finish_cmd(cmd);
-		return;
-	}
-
-	/* report the timeout to libata */
-	sas_end_task(cmd, task);
-	list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
-}
-
 static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
 {
 	struct scsi_cmnd *cmd, *n;
@@ -260,7 +253,7 @@
 	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
 		if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
 		    cmd->device->lun == my_cmd->device->lun)
-			sas_eh_defer_cmd(cmd);
+			sas_eh_finish_cmd(cmd);
 	}
 }
 
@@ -622,12 +615,12 @@
 		case TASK_IS_DONE:
 			SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
 				    task);
-			sas_eh_defer_cmd(cmd);
+			sas_eh_finish_cmd(cmd);
 			continue;
 		case TASK_IS_ABORTED:
 			SAS_DPRINTK("%s: task 0x%p is aborted\n",
 				    __func__, task);
-			sas_eh_defer_cmd(cmd);
+			sas_eh_finish_cmd(cmd);
 			continue;
 		case TASK_IS_AT_LU:
 			SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
@@ -638,7 +631,7 @@
 					    "recovered\n",
 					    SAS_ADDR(task->dev),
 					    cmd->device->lun);
-				sas_eh_defer_cmd(cmd);
+				sas_eh_finish_cmd(cmd);
 				sas_scsi_clear_queue_lu(work_q, cmd);
 				goto Again;
 			}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4532990..cf15b97 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -635,7 +635,12 @@
 	LPFC_MBOXQ_t *pmboxq;
 	int mbxstatus = MBXERR_ERROR;
 
+	/*
+	 * If the link is offline, disabled or BLOCK_MGMT_IO
+	 * it doesn't make any sense to allow issue_lip
+	 */
 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+	    (phba->hba_flag & LINK_DISABLED) ||
 	    (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
 		return -EPERM;
 
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7d2ad63..8173645 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -690,8 +690,9 @@
 	    (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
 		if (pring->flag & LPFC_STOP_IOCB_EVENT) {
 			pring->flag |= LPFC_DEFERRED_RING_EVENT;
-			/* Set the lpfc data pending flag */
-			set_bit(LPFC_DATA_READY, &phba->data_flags);
+			/* Preserve legacy behavior. */
+			if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
+				set_bit(LPFC_DATA_READY, &phba->data_flags);
 		} else {
 			if (phba->link_state >= LPFC_LINK_UP) {
 				pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 0902ed2..6df06e7 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -116,6 +116,8 @@
 	/* set consumption flag every once in a while */
 	if (!((q->host_index + 1) % q->entry_repost))
 		bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
+	else
+		bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
 	if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
 		bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
 	lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 44da9d8..caa0045 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -8853,7 +8853,7 @@
 	snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
 	    "fw_event_%s%d", ioc->driver_name, ioc->id);
 	ioc->firmware_event_thread = alloc_ordered_workqueue(
-	    ioc->firmware_event_name, WQ_MEM_RECLAIM);
+	    ioc->firmware_event_name, 0);
 	if (!ioc->firmware_event_thread) {
 		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 7de5d8d..eb5471b 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -1080,16 +1080,16 @@
 			void __iomem *regs = mvi->regs_ex - 0x10200;
 
 			int drive = (i/3) & (4-1); /* drive number on host */
-			u32 block = mr32(MVS_SGPIO_DCTRL +
+			int driveshift = drive * 8; /* bit offset of drive */
+			u32 block = ioread32be(regs + MVS_SGPIO_DCTRL +
 				MVS_SGPIO_HOST_OFFSET * mvi->id);
 
-
 			/*
 			* if bit is set then create a mask with the first
 			* bit of the drive set in the mask ...
 			*/
-			u32 bit = (write_data[i/8] & (1 << (i&(8-1)))) ?
-				1<<(24-drive*8) : 0;
+			u32 bit = get_unaligned_be32(write_data) & (1 << i) ?
+				1 << driveshift : 0;
 
 			/*
 			* ... and then shift it to the right position based
@@ -1098,26 +1098,27 @@
 			switch (i%3) {
 			case 0: /* activity */
 				block &= ~((0x7 << MVS_SGPIO_DCTRL_ACT_SHIFT)
-					<< (24-drive*8));
+					<< driveshift);
 					/* hardwire activity bit to SOF */
 				block |= LED_BLINKA_SOF << (
 					MVS_SGPIO_DCTRL_ACT_SHIFT +
-					(24-drive*8));
+					driveshift);
 				break;
 			case 1: /* id */
 				block &= ~((0x3 << MVS_SGPIO_DCTRL_LOC_SHIFT)
-					<< (24-drive*8));
+					<< driveshift);
 				block |= bit << MVS_SGPIO_DCTRL_LOC_SHIFT;
 				break;
 			case 2: /* fail */
 				block &= ~((0x7 << MVS_SGPIO_DCTRL_ERR_SHIFT)
-					<< (24-drive*8));
+					<< driveshift);
 				block |= bit << MVS_SGPIO_DCTRL_ERR_SHIFT;
 				break;
 			}
 
-			mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id,
-				block);
+			iowrite32be(block,
+				regs + MVS_SGPIO_DCTRL +
+				MVS_SGPIO_HOST_OFFSET * mvi->id);
 
 		}
 
@@ -1132,7 +1133,7 @@
 			void __iomem *regs = mvi->regs_ex - 0x10200;
 
 			mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id,
-				be32_to_cpu(((u32 *) write_data)[i]));
+				((u32 *) write_data)[i]);
 		}
 		return reg_count;
 	}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index bddaabb..73c99f2 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -272,7 +272,8 @@
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 
 	/* Read all mbox registers? */
-	mboxes = (1 << ha->mbx_count) - 1;
+	WARN_ON_ONCE(ha->mbx_count > 32);
+	mboxes = (1ULL << ha->mbx_count) - 1;
 	if (!ha->mcp)
 		ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
 	else
@@ -2516,7 +2517,8 @@
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
 	/* Read all mbox registers? */
-	mboxes = (1 << ha->mbx_count) - 1;
+	WARN_ON_ONCE(ha->mbx_count > 32);
+	mboxes = (1ULL << ha->mbx_count) - 1;
 	if (!ha->mcp)
 		ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
 	else
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index a7cfc27..ce1d063f 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -168,6 +168,8 @@
 #define DEV_DB_NON_PERSISTENT	0
 #define DEV_DB_PERSISTENT	1
 
+#define QL4_ISP_REG_DISCONNECT 0xffffffffU
+
 #define COPY_ISID(dst_isid, src_isid) {			\
 	int i, j;					\
 	for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;)	\
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 01c3610..d8c0343 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -262,6 +262,24 @@
 
 static struct scsi_transport_template *qla4xxx_scsi_transport;
 
+static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha)
+{
+	u32 reg_val = 0;
+	int rval = QLA_SUCCESS;
+
+	if (is_qla8022(ha))
+		reg_val = readl(&ha->qla4_82xx_reg->host_status);
+	else if (is_qla8032(ha) || is_qla8042(ha))
+		reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
+	else
+		reg_val = readw(&ha->reg->ctrl_status);
+
+	if (reg_val == QL4_ISP_REG_DISCONNECT)
+		rval = QLA_ERROR;
+
+	return rval;
+}
+
 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
 			     uint32_t iface_type, uint32_t payload_size,
 			     uint32_t pid, struct sockaddr *dst_addr)
@@ -9196,10 +9214,17 @@
 	struct srb *srb = NULL;
 	int ret = SUCCESS;
 	int wait = 0;
+	int rval;
 
 	ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",
 		   ha->host_no, id, lun, cmd, cmd->cmnd[0]);
 
+	rval = qla4xxx_isp_check_reg(ha);
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+		return FAILED;
+	}
+
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	srb = (struct srb *) CMD_SP(cmd);
 	if (!srb) {
@@ -9251,6 +9276,7 @@
 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
 	int ret = FAILED, stat;
+	int rval;
 
 	if (!ddb_entry)
 		return ret;
@@ -9270,6 +9296,12 @@
 		      cmd, jiffies, cmd->request->timeout / HZ,
 		      ha->dpc_flags, cmd->result, cmd->allowed));
 
+	rval = qla4xxx_isp_check_reg(ha);
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+		return FAILED;
+	}
+
 	/* FIXME: wait for hba to go online */
 	stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
 	if (stat != QLA_SUCCESS) {
@@ -9313,6 +9345,7 @@
 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
 	int stat, ret;
+	int rval;
 
 	if (!ddb_entry)
 		return FAILED;
@@ -9330,6 +9363,12 @@
 		      ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
 		      ha->dpc_flags, cmd->result, cmd->allowed));
 
+	rval = qla4xxx_isp_check_reg(ha);
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+		return FAILED;
+	}
+
 	stat = qla4xxx_reset_target(ha, ddb_entry);
 	if (stat != QLA_SUCCESS) {
 		starget_printk(KERN_INFO, scsi_target(cmd->device),
@@ -9384,9 +9423,16 @@
 {
 	int return_status = FAILED;
 	struct scsi_qla_host *ha;
+	int rval;
 
 	ha = to_qla_host(cmd->device->host);
 
+	rval = qla4xxx_isp_check_reg(ha);
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+		return FAILED;
+	}
+
 	if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
 		qla4_83xx_set_idc_dontreset(ha);
 
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 57a3ee0..3ccc858 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2044,6 +2044,8 @@
 	if (!shost->use_clustering)
 		q->limits.cluster = 0;
 
+	if (shost->inlinecrypt_support)
+		queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q);
 	/*
 	 * Set a reasonable default alignment:  The larger of 32-byte (dword),
 	 * which is a common minimum for HBAs, and the minimum DMA alignment,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 4fb494a..1cb0403 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1860,6 +1860,8 @@
 				break;	/* standby */
 			if (sshdr.asc == 4 && sshdr.ascq == 0xc)
 				break;	/* unavailable */
+			if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
+				break;	/* sanitize in progress */
 			/*
 			 * Issue command to spin up drive when not ready
 			 */
@@ -2319,6 +2321,7 @@
 	int res;
 	struct scsi_device *sdp = sdkp->device;
 	struct scsi_mode_data data;
+	int disk_ro = get_disk_ro(sdkp->disk);
 
 	set_disk_ro(sdkp->disk, 0);
 	if (sdp->skip_ms_page_3f) {
@@ -2358,7 +2361,7 @@
 			  "Test WP failed, assume Write Enabled\n");
 	} else {
 		sdkp->write_prot = ((data.device_specific & 0x80) != 0);
-		set_disk_ro(sdkp->disk, sdkp->write_prot);
+		set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro);
 	}
 }
 
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 3d9ad4c..15d5af0 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1897,7 +1897,7 @@
 		num = (rem_sz > scatter_elem_sz_prev) ?
 			scatter_elem_sz_prev : rem_sz;
 
-		schp->pages[k] = alloc_pages(gfp_mask, order);
+		schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
 		if (!schp->pages[k])
 			goto out;
 
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index e635973..0169984 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -522,6 +522,8 @@
 	struct scsi_cd *cd;
 	int ret = -ENXIO;
 
+	check_disk_change(bdev);
+
 	mutex_lock(&sr_mutex);
 	cd = scsi_cd_get(bdev->bd_disk);
 	if (cd) {
@@ -582,18 +584,28 @@
 static unsigned int sr_block_check_events(struct gendisk *disk,
 					  unsigned int clearing)
 {
-	struct scsi_cd *cd = scsi_cd(disk);
+	unsigned int ret = 0;
+	struct scsi_cd *cd;
 
-	if (atomic_read(&cd->device->disk_events_disable_depth))
+	cd = scsi_cd_get(disk);
+	if (!cd)
 		return 0;
 
-	return cdrom_check_events(&cd->cdi, clearing);
+	if (!atomic_read(&cd->device->disk_events_disable_depth))
+		ret = cdrom_check_events(&cd->cdi, clearing);
+
+	scsi_cd_put(cd);
+	return ret;
 }
 
 static int sr_block_revalidate_disk(struct gendisk *disk)
 {
-	struct scsi_cd *cd = scsi_cd(disk);
 	struct scsi_sense_hdr sshdr;
+	struct scsi_cd *cd;
+
+	cd = scsi_cd_get(disk);
+	if (!cd)
+		return -ENXIO;
 
 	/* if the unit is not ready, nothing more to do */
 	if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
@@ -602,6 +614,7 @@
 	sr_cd_check(&cd->cdi);
 	get_sectorsize(cd);
 out:
+	scsi_cd_put(cd);
 	return 0;
 }
 
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 0dd1984..d92b280 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1580,7 +1580,7 @@
 	.eh_timed_out =		storvsc_eh_timed_out,
 	.slave_alloc =		storvsc_device_alloc,
 	.slave_configure =	storvsc_device_configure,
-	.cmd_per_lun =		255,
+	.cmd_per_lun =		2048,
 	.this_id =		-1,
 	.use_clustering =	ENABLE_CLUSTERING,
 	/* Make sure we dont get a sg segment crosses a page boundary */
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 6b349e3..c6425e3 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -536,7 +536,7 @@
 	 *  Look for the greatest clock divisor that allows an 
 	 *  input speed faster than the period.
 	 */
-	while (div-- > 0)
+	while (--div > 0)
 		if (kpc >= (div_10M[div] << 2)) break;
 
 	/*
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
index b1c86d4..d4fe6ee 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ b/drivers/scsi/ufs/ufs-qcom-ice.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -172,17 +172,15 @@
 static void ufs_qcom_ice_cfg_work(struct work_struct *work)
 {
 	unsigned long flags;
-	struct ice_data_setting ice_set;
 	struct ufs_qcom_host *qcom_host =
 		container_of(work, struct ufs_qcom_host, ice_cfg_work);
-	struct request *req_pending = NULL;
 
 	if (!qcom_host->ice.vops->config_start)
 		return;
 
 	spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
-	req_pending = qcom_host->req_pending;
-	if (!req_pending) {
+	if (!qcom_host->req_pending) {
+		qcom_host->work_pending = false;
 		spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
 		return;
 	}
@@ -191,24 +189,15 @@
 	/*
 	 * config_start is called again as previous attempt returned -EAGAIN,
 	 * this call shall now take care of the necessary key setup.
-	 * 'ice_set' will not actually be used, instead the next call to
-	 * config_start() for this request, in the normal call flow, will
-	 * succeed as the key has now been setup.
 	 */
 	qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
-		qcom_host->req_pending, &ice_set, false);
+		qcom_host->req_pending, NULL, false);
 
 	spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
 	qcom_host->req_pending = NULL;
+	qcom_host->work_pending = false;
 	spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
 
-	/*
-	 * Resume with requests processing. We assume config_start has been
-	 * successful, but even if it wasn't we still must resume in order to
-	 * allow for the request to be retried.
-	 */
-	ufshcd_scsi_unblock_requests(qcom_host->hba);
-
 }
 
 /**
@@ -294,18 +283,14 @@
 			 * requires a non-atomic context, this means we should
 			 * call the function again from the worker thread to do
 			 * the configuration. For this request the error will
-			 * propagate so it will be re-queued and until the
-			 * configuration is is completed we block further
-			 * request processing.
+			 * propagate so it will be re-queued.
 			 */
 			if (err == -EAGAIN) {
 				dev_dbg(qcom_host->hba->dev,
 					"%s: scheduling task for ice setup\n",
 					__func__);
 
-				if (!qcom_host->req_pending) {
-					ufshcd_scsi_block_requests(
-						qcom_host->hba);
+				if (!qcom_host->work_pending) {
 					qcom_host->req_pending = cmd->request;
 
 					if (!queue_work(ice_workqueue,
@@ -316,10 +301,9 @@
 						&qcom_host->ice_work_lock,
 						flags);
 
-						ufshcd_scsi_unblock_requests(
-							qcom_host->hba);
 						return err;
 					}
+					qcom_host->work_pending = true;
 				}
 
 			} else {
@@ -418,9 +402,7 @@
 			 * requires a non-atomic context, this means we should
 			 * call the function again from the worker thread to do
 			 * the configuration. For this request the error will
-			 * propagate so it will be re-queued and until the
-			 * configuration is is completed we block further
-			 * request processing.
+			 * propagate so it will be re-queued.
 			 */
 			if (err == -EAGAIN) {
 
@@ -428,9 +410,8 @@
 					"%s: scheduling task for ice setup\n",
 					__func__);
 
-				if (!qcom_host->req_pending) {
-					ufshcd_scsi_block_requests(
-						qcom_host->hba);
+				if (!qcom_host->work_pending) {
+
 					qcom_host->req_pending = cmd->request;
 					if (!queue_work(ice_workqueue,
 						&qcom_host->ice_cfg_work)) {
@@ -440,10 +421,9 @@
 						&qcom_host->ice_work_lock,
 						flags);
 
-						ufshcd_scsi_unblock_requests(
-							qcom_host->hba);
 						return err;
 					}
+					qcom_host->work_pending = true;
 				}
 
 			} else {
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 39ab28a..93c3af0 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -897,11 +897,18 @@
 		req = lrbp->cmd->request;
 	else
 		return 0;
-
-	/* Use request LBA as the DUN value */
-	if (req->bio)
-		*dun = (req->bio->bi_iter.bi_sector) >>
-				UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
+	/*
+	 * Right now ICE do not support variable dun but can be
+	 * taken as future enhancement
+	 * if (bio_dun(req->bio)) {
+	 *      dun @bio can be split, so we have to adjust offset
+	 *      *dun = bio_dun(req->bio);
+	 * } else
+	 */
+	if (req->bio) {
+		*dun = req->bio->bi_iter.bi_sector;
+		*dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
+	}
 
 	ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
 
@@ -2133,6 +2140,8 @@
 		dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
 			__func__, err);
 		goto out_host_free;
+	} else {
+		hba->host->inlinecrypt_support = 1;
 	}
 
 	host->generic_phy = devm_phy_get(dev, "ufsphy");
@@ -2740,7 +2749,7 @@
 	 * the regulators.
 	 */
 	if (of_property_read_bool(np, "non-removable") &&
-	    strlen(android_boot_dev) &&
+	    !of_property_read_bool(np, "force-ufshc-probe") &&
 	    strcmp(android_boot_dev, dev_name(dev)))
 		return -ENODEV;
 
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index a03ecb0..27f6a05 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -375,6 +375,7 @@
 	struct work_struct ice_cfg_work;
 	struct request *req_pending;
 	struct ufs_vreg *vddp_ref_clk;
+	bool work_pending;
 };
 
 static inline u32
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index d427fb3..c6cfd18 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -5433,7 +5433,7 @@
 	/* REPORT SUPPORTED OPERATION CODES is not supported */
 	sdev->no_report_opcodes = 1;
 
-	/* WRITE_SAME command is not supported*/
+	/* WRITE_SAME command is not supported */
 	sdev->no_write_same = 1;
 
 	ufshcd_set_queue_depth(sdev);
@@ -6559,8 +6559,8 @@
 	u32 mode;
 
 	hba = container_of(work, struct ufs_hba, rls_work);
-	ufshcd_scsi_block_requests(hba);
 	pm_runtime_get_sync(hba->dev);
+	ufshcd_scsi_block_requests(hba);
 	down_write(&hba->lock);
 	ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
 	if (ret) {
@@ -7090,7 +7090,10 @@
 	 * To avoid these unnecessary/illegal step we skip to the last error
 	 * handling stage: reset and restore.
 	 */
-	if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
+	if ((lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) ||
+	    (lrbp->lun == UFS_UPIU_REPORT_LUNS_WLUN) ||
+	    (lrbp->lun == UFS_UPIU_BOOT_WLUN) ||
+	    (lrbp->lun == UFS_UPIU_RPMB_WLUN))
 		return ufshcd_eh_host_reset_handler(cmd);
 
 	ufshcd_hold_all(hba);
@@ -8093,9 +8096,16 @@
 	/*
 	 * If we failed to initialize the device or the device is not
 	 * present, turn off the power/clocks etc.
+	 * In cases when there's both ufs and emmc present and regualtors
+	 * are shared b/w the two, this shouldn't turn-off the regulators
+	 * w/o giving emmc a chance to send PON.
+	 * Hence schedule a delayed suspend, thus giving enough time to
+	 * emmc to vote for the shared regulator.
 	 */
-	if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress)
-		pm_runtime_put_sync(hba->dev);
+	if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+		pm_runtime_put_noidle(hba->dev);
+		pm_schedule_suspend(hba->dev, MSEC_PER_SEC * 10);
+	}
 
 	trace_ufshcd_init(dev_name(hba->dev), ret,
 		ktime_to_us(ktime_sub(ktime_get(), start)),
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index 26dfd3f..34be230 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -214,6 +214,8 @@
 						dsp);
 	struct pd_qmi_client_data *reg;
 
+	/* Resetting the log level */
+	SLIM_RST_LOGLVL(dev);
 	SLIM_INFO(dev, "SLIM DSP SSR/PDR notify cb:0x%lx, type:%d\n",
 			code, dsp->dom_t);
 	switch (code) {
diff --git a/drivers/slimbus/slim-msm.c b/drivers/slimbus/slim-msm.c
index 24a3ccf..8f4851e 100644
--- a/drivers/slimbus/slim-msm.c
+++ b/drivers/slimbus/slim-msm.c
@@ -182,8 +182,12 @@
 	if (!ctrl_dev->iommu_desc.cb_dev)
 		return 0;
 
-	if (!IS_ERR_OR_NULL(ctrl_dev->iommu_desc.iommu_map))
-		return 0;
+	if (!IS_ERR_OR_NULL(ctrl_dev->iommu_desc.iommu_map)) {
+		arm_iommu_detach_device(ctrl_dev->iommu_desc.cb_dev);
+		arm_iommu_release_mapping(ctrl_dev->iommu_desc.iommu_map);
+		ctrl_dev->iommu_desc.iommu_map = NULL;
+		SLIM_INFO(ctrl_dev, "NGD IOMMU Dettach complete\n");
+	}
 
 	dev = ctrl_dev->iommu_desc.cb_dev;
 	iommu_map = arm_iommu_create_mapping(&platform_bus_type,
@@ -1300,12 +1304,6 @@
 			msm_slim_disconn_pipe_port(dev, i);
 	}
 
-	if (!IS_ERR_OR_NULL(dev->iommu_desc.iommu_map)) {
-		arm_iommu_detach_device(dev->iommu_desc.cb_dev);
-		arm_iommu_release_mapping(dev->iommu_desc.iommu_map);
-		dev->iommu_desc.iommu_map = NULL;
-	}
-
 	if (dereg) {
 		for (i = 0; i < dev->port_nums; i++) {
 			if (dev->pipes[i].connected)
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 5fafaca..3ecca59 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -684,6 +684,17 @@
 	  determines last CPU to call into PSCI for cluster Low power
 	  modes.
 
+config MSM_PM_LEGACY
+	depends on PM
+	select MSM_IDLE_STATS if DEBUG_FS
+	select CPU_IDLE_MULTIPLE_DRIVERS
+	bool "Qualcomm platform specific Legacy PM driver"
+	help
+	  Platform specific legacy power driver to manage
+	  cores and l2 low power modes. It interface with
+	  various system driver and put the cores into
+	  low power modes.
+
 config MSM_NOPM
 	default y if !PM
 	bool
@@ -700,7 +711,7 @@
 	  trusted apps, unloading them and marshalling buffers to the
 	  trusted fingerprint app.
 
-if MSM_PM
+if (MSM_PM || MSM_PM_LEGACY)
 menuconfig MSM_IDLE_STATS
 	bool "Collect idle statistics"
 	help
@@ -733,7 +744,7 @@
 	  histogram.  This is for collecting statistics on suspend.
 
 endif # MSM_IDLE_STATS
-endif # MSM_PM
+endif # MSM_PM || MSM_PM_LEGACY
 
 config QCOM_DCC_V2
 	bool "Qualcomm Technologies Data Capture and Compare engine support for V2"
@@ -868,3 +879,11 @@
 		interrupt event and event data.
 
 source "drivers/soc/qcom/wcnss/Kconfig"
+
+config BIG_CLUSTER_MIN_FREQ_ADJUST
+	bool "Adjust BIG cluster min frequency based on power collapse state"
+	default n
+	help
+	  This driver is used to set the floor of the min frequency of big cluster
+	  to the user specified value when the cluster is not power collapsed. When
+	  the cluster is power collpsed it resets the value to physical limits.
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index b83f554..0b71121 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -14,6 +14,7 @@
 obj-$(CONFIG_QCOM_SMD) +=	smd.o
 obj-$(CONFIG_QCOM_SMD_RPM)	+= smd-rpm.o
 obj-$(CONFIG_QCOM_SMEM) +=	smem.o
+obj-$(CONFIG_MSM_PM_LEGACY) += pm-boot.o msm-pm.o
 obj-$(CONFIG_MSM_SPM) += msm-spm.o spm_devices.o
 obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
 obj-$(CONFIG_QCOM_SMP2P)	+= smp2p.o
@@ -105,3 +106,4 @@
 obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
 obj-$(CONFIG_MSM_BAM_DMUX) += bam_dmux.o
 obj-$(CONFIG_WCNSS_CORE) += wcnss/
+obj-$(CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST) += big_cluster_min_freq_adjust.o
diff --git a/drivers/soc/qcom/bg_rsb.c b/drivers/soc/qcom/bg_rsb.c
index fdfd7b7..ffba372 100644
--- a/drivers/soc/qcom/bg_rsb.c
+++ b/drivers/soc/qcom/bg_rsb.c
@@ -33,7 +33,7 @@
 
 #define BGRSB_GLINK_INTENT_SIZE 0x04
 #define BGRSB_MSG_SIZE 0x08
-#define TIMEOUT_MS 500
+#define TIMEOUT_MS 2000
 
 #define BGRSB_LDO15_VTG_MIN_UV 3300000
 #define BGRSB_LDO15_VTG_MAX_UV 3300000
@@ -122,6 +122,8 @@
 
 	struct device *ldev;
 
+	struct wakeup_source bgrsb_ws;
+
 	wait_queue_head_t link_state_wait;
 
 	uint32_t calbrtion_intrvl;
@@ -462,6 +464,7 @@
 	if (!dev->chnl_state)
 		return -ENODEV;
 
+	__pm_stay_awake(&dev->bgrsb_ws);
 	mutex_lock(&dev->glink_mutex);
 	init_completion(&dev->tx_done);
 	init_completion(&dev->bg_resp_cmplt);
@@ -507,6 +510,7 @@
 
 err_ret:
 	mutex_unlock(&dev->glink_mutex);
+	__pm_relax(&dev->bgrsb_ws);
 	return rc;
 }
 
@@ -544,7 +548,7 @@
 
 		rc = wait_event_timeout(dev->link_state_wait,
 				(dev->chnl_state == true),
-					msecs_to_jiffies(TIMEOUT_MS*2));
+					msecs_to_jiffies(TIMEOUT_MS));
 		if (rc == 0) {
 			pr_err("Glink channel connection time out\n");
 			return;
@@ -574,7 +578,7 @@
 
 		rc = wait_event_timeout(dev->link_state_wait,
 					(dev->chnl_state == true),
-						msecs_to_jiffies(TIMEOUT_MS*2));
+						msecs_to_jiffies(TIMEOUT_MS));
 		if (rc == 0) {
 			pr_err("Glink channel connection time out\n");
 			return;
@@ -904,6 +908,9 @@
 	if (!dev)
 		return -ENOMEM;
 
+	/* Add wake lock for PM suspend */
+	wakeup_source_init(&dev->bgrsb_ws, "BGRSB_wake_lock");
+
 	dev->bgrsb_current_state = BGRSB_STATE_UNKNOWN;
 	rc = bgrsb_init(dev);
 	if (rc)
@@ -964,13 +971,14 @@
 	destroy_workqueue(dev->bgrsb_event_wq);
 	destroy_workqueue(dev->bgrsb_wq);
 	input_free_device(dev->input);
+	wakeup_source_trash(&dev->bgrsb_ws);
 
 	return 0;
 }
 
-static int bg_rsb_resume(struct platform_device *pdev)
+static int bg_rsb_resume(struct device *pldev)
 {
-	int rc;
+	struct platform_device *pdev = to_platform_device(pldev);
 	struct bgrsb_priv *dev = platform_get_drvdata(pdev);
 
 	if (dev->bgrsb_current_state == BGRSB_STATE_RSB_CONFIGURED)
@@ -978,12 +986,6 @@
 
 	if (dev->bgrsb_current_state == BGRSB_STATE_INIT) {
 		if (bgrsb_ldo_work(dev, BGRSB_ENABLE_LDO11) == 0) {
-			rc = bgrsb_configr_rsb(dev, true);
-			if (rc != 0) {
-				pr_err("BG failed to configure RSB %d\n", rc);
-				bgrsb_ldo_work(dev, BGRSB_DISABLE_LDO11);
-				return rc;
-			}
 			dev->bgrsb_current_state = BGRSB_STATE_RSB_CONFIGURED;
 			pr_debug("RSB Cofigured\n");
 			return 0;
@@ -993,8 +995,9 @@
 	return -EINVAL;
 }
 
-static int bg_rsb_suspend(struct platform_device *pdev, pm_message_t state)
+static int bg_rsb_suspend(struct device *pldev)
 {
+	struct platform_device *pdev = to_platform_device(pldev);
 	struct bgrsb_priv *dev = platform_get_drvdata(pdev);
 
 	if (dev->bgrsb_current_state == BGRSB_STATE_INIT)
@@ -1021,15 +1024,19 @@
 	{ }
 };
 
+static const struct dev_pm_ops pm_rsb = {
+	.resume		= bg_rsb_resume,
+	.suspend	= bg_rsb_suspend,
+};
+
 static struct platform_driver bg_rsb_driver = {
 	.driver = {
 		.name = "bg-rsb",
 		.of_match_table = bg_rsb_of_match,
+		.pm = &pm_rsb,
 	},
 	.probe		= bg_rsb_probe,
 	.remove		= bg_rsb_remove,
-	.resume		= bg_rsb_resume,
-	.suspend	= bg_rsb_suspend,
 };
 
 module_platform_driver(bg_rsb_driver);
diff --git a/drivers/soc/qcom/bgcom_interface.c b/drivers/soc/qcom/bgcom_interface.c
index efef26d..1cde8c6 100644
--- a/drivers/soc/qcom/bgcom_interface.c
+++ b/drivers/soc/qcom/bgcom_interface.c
@@ -43,7 +43,8 @@
 #define BGDAEMON_LDO03_LPM_VTG 0
 #define BGDAEMON_LDO03_NPM_VTG 10000
 
-#define MPPS_DOWN_EVENT_TO_BG_TIMEOUT 100
+#define MPPS_DOWN_EVENT_TO_BG_TIMEOUT 3000
+#define SLEEP_FOR_SPI_BUS 2000
 
 enum {
 	SSR_DOMAIN_BG,
@@ -93,6 +94,7 @@
 static  dev_t                    bg_dev;
 static  int                      device_open;
 static  void                     *handle;
+static	bool                     twm_exit;
 static  struct   bgcom_open_config_type   config_type;
 static DECLARE_COMPLETION(bg_modem_down_wait);
 
@@ -353,6 +355,8 @@
 		break;
 	case SET_SPI_BUSY:
 		ret = bgcom_set_spi_state(BGCOM_SPI_BUSY);
+		/* Add sleep for  SPI Bus to release*/
+		msleep(SLEEP_FOR_SPI_BUS);
 		break;
 	case BG_SOFT_RESET:
 		ret = bg_soft_reset();
@@ -360,6 +364,10 @@
 	case BG_MODEM_DOWN2_BG_DONE:
 		ret = modem_down2_bg();
 		break;
+	case BG_TWM_EXIT:
+		twm_exit = true;
+		ret = 0;
+		break;
 	default:
 		ret = -ENOIOCTLCMD;
 	}
@@ -515,6 +523,10 @@
 		bgcom_set_spi_state(BGCOM_SPI_BUSY);
 		send_uevent(&bge);
 		break;
+	case SUBSYS_AFTER_SHUTDOWN:
+		/* Add sleep for  SPI Bus to release*/
+		msleep(SLEEP_FOR_SPI_BUS);
+		break;
 	case SUBSYS_AFTER_POWERUP:
 		bge.e_type = BG_AFTER_POWER_UP;
 		bgdaemon_ldowork(DISABLE_LDO03);
@@ -555,6 +567,16 @@
 	return NOTIFY_DONE;
 }
 
+bool is_twm_exit(void)
+{
+	if (twm_exit) {
+		twm_exit = false;
+		return true;
+	}
+	return false;
+}
+EXPORT_SYMBOL(is_twm_exit);
+
 static struct notifier_block ssr_modem_nb = {
 	.notifier_call = ssr_modem_cb,
 	.priority = 0,
diff --git a/drivers/soc/qcom/bgcom_interface.h b/drivers/soc/qcom/bgcom_interface.h
index 500ca6d..235995e 100644
--- a/drivers/soc/qcom/bgcom_interface.h
+++ b/drivers/soc/qcom/bgcom_interface.h
@@ -13,10 +13,17 @@
 #ifndef BGCOM_INTERFACE_H
 #define BGCOM_INTERFACE_H
 
-/**
+/*
  * bg_soft_reset() - soft reset Blackghost
  * Return 0 on success or -Ve on error
  */
 int bg_soft_reset(void);
 
+/*
+ * is_twm_exit()
+ * Return true if device is booting up on TWM exit.
+ * value is auto cleared once read.
+ */
+bool is_twm_exit(void);
+
 #endif /* BGCOM_INTERFACE_H */
diff --git a/drivers/soc/qcom/bgcom_spi.c b/drivers/soc/qcom/bgcom_spi.c
index b8e3b84..d2dc05f 100644
--- a/drivers/soc/qcom/bgcom_spi.c
+++ b/drivers/soc/qcom/bgcom_spi.c
@@ -25,6 +25,7 @@
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
 #include <linux/kthread.h>
+#include <linux/dma-mapping.h>
 #include "bgcom.h"
 #include "bgrsb.h"
 #include "bgcom_interface.h"
@@ -43,12 +44,11 @@
 
 #define BG_SPI_MAX_WORDS (0x3FFFFFFD)
 #define BG_SPI_MAX_REGS (0x0A)
-#define SLEEP_IN_STATE_CHNG 2000
 #define HED_EVENT_ID_LEN (0x02)
 #define HED_EVENT_SIZE_LEN (0x02)
 #define HED_EVENT_DATA_STRT_LEN (0x05)
 
-#define MAX_RETRY 200
+#define MAX_RETRY 500
 
 enum bgcom_state {
 	/*BGCOM Staus ready*/
@@ -63,6 +63,7 @@
 	BGCOM_READ_REG = 0,
 	BGCOM_READ_FIFO = 1,
 	BGCOM_READ_AHB = 2,
+	BGCOM_WRITE_REG = 3,
 };
 
 struct bg_spi_priv {
@@ -112,6 +113,17 @@
 
 static struct mutex bg_resume_mutex;
 
+static atomic_t  bg_is_spi_active;
+static int bg_irq;
+
+static struct spi_device *get_spi_device(void)
+{
+	struct bg_spi_priv *bg_spi = container_of(bg_com_drv,
+						struct bg_spi_priv, lhandle);
+	struct spi_device *spi = bg_spi->spi;
+	return spi;
+}
+
 static void augmnt_fifo(uint8_t *data, int pos)
 {
 	data[pos] = '\0';
@@ -149,8 +161,6 @@
 
 	mutex_lock(&bg_spi->xfer_mutex);
 	spi_state = state;
-	if (spi_state == BGCOM_SPI_BUSY)
-		msleep(SLEEP_IN_STATE_CHNG);
 	mutex_unlock(&bg_spi->xfer_mutex);
 	return 0;
 }
@@ -197,6 +207,10 @@
 	case BGCOM_READ_FIFO:
 		ret = bgcom_fifo_read(&clnt_handle, no_of_words, buf);
 		break;
+	case BGCOM_WRITE_REG:
+		ret = bgcom_reg_write(&clnt_handle, BG_CMND_REG,
+					no_of_words, buf);
+		break;
 	case BGCOM_READ_AHB:
 		break;
 	}
@@ -232,6 +246,9 @@
 	tx_xfer = &bg_spi->xfer1;
 	spi = bg_spi->spi;
 
+	if (!atomic_read(&bg_is_spi_active))
+		return -ECANCELED;
+
 	mutex_lock(&bg_spi->xfer_mutex);
 	bg_spi_reinit_xfer(tx_xfer);
 	tx_xfer->tx_buf = tx_buf;
@@ -443,6 +460,7 @@
 int bgcom_ahb_read(void *handle, uint32_t ahb_start_addr,
 	uint32_t num_words, void *read_buf)
 {
+	dma_addr_t dma_hndl_tx, dma_hndl_rx;
 	uint32_t txn_len;
 	uint8_t *tx_buf;
 	uint8_t *rx_buf;
@@ -450,6 +468,7 @@
 	int ret;
 	uint8_t cmnd = 0;
 	uint32_t ahb_addr = 0;
+	struct spi_device *spi = get_spi_device();
 
 	if (!handle || !read_buf || num_words == 0
 		|| num_words > BG_SPI_MAX_WORDS) {
@@ -472,15 +491,16 @@
 	size = num_words*BG_SPI_WORD_SIZE;
 	txn_len = BG_SPI_AHB_READ_CMD_LEN + size;
 
-	tx_buf = kzalloc(txn_len, GFP_KERNEL);
 
+	tx_buf = dma_zalloc_coherent(&spi->dev, txn_len,
+					&dma_hndl_tx, GFP_KERNEL);
 	if (!tx_buf)
 		return -ENOMEM;
 
-	rx_buf = kzalloc(txn_len, GFP_KERNEL);
-
+	rx_buf = dma_zalloc_coherent(&spi->dev, txn_len,
+					&dma_hndl_rx, GFP_KERNEL);
 	if (!rx_buf) {
-		kfree(tx_buf);
+		dma_free_coherent(&spi->dev, txn_len, tx_buf, dma_hndl_tx);
 		return -ENOMEM;
 	}
 
@@ -495,8 +515,8 @@
 	if (!ret)
 		memcpy(read_buf, rx_buf+BG_SPI_AHB_READ_CMD_LEN, size);
 
-	kfree(tx_buf);
-	kfree(rx_buf);
+	dma_free_coherent(&spi->dev, txn_len, tx_buf, dma_hndl_tx);
+	dma_free_coherent(&spi->dev, txn_len, rx_buf, dma_hndl_rx);
 	return ret;
 }
 EXPORT_SYMBOL(bgcom_ahb_read);
@@ -504,12 +524,14 @@
 int bgcom_ahb_write(void *handle, uint32_t ahb_start_addr,
 	uint32_t num_words, void *write_buf)
 {
+	dma_addr_t dma_hndl;
 	uint32_t txn_len;
 	uint8_t *tx_buf;
 	uint32_t size;
 	int ret;
 	uint8_t cmnd = 0;
 	uint32_t ahb_addr = 0;
+	struct spi_device *spi = get_spi_device();
 
 	if (!handle || !write_buf || num_words == 0
 		|| num_words > BG_SPI_MAX_WORDS) {
@@ -532,9 +554,7 @@
 
 	size = num_words*BG_SPI_WORD_SIZE;
 	txn_len = BG_SPI_AHB_CMD_LEN + size;
-
-	tx_buf = kzalloc(txn_len, GFP_KERNEL);
-
+	tx_buf = dma_zalloc_coherent(&spi->dev, txn_len, &dma_hndl, GFP_KERNEL);
 	if (!tx_buf)
 		return -ENOMEM;
 
@@ -546,7 +566,7 @@
 	memcpy(tx_buf+BG_SPI_AHB_CMD_LEN, write_buf, size);
 
 	ret = bgcom_transfer(handle, tx_buf, NULL, txn_len);
-	kfree(tx_buf);
+	dma_free_coherent(&spi->dev, txn_len, tx_buf, dma_hndl);
 	return ret;
 }
 EXPORT_SYMBOL(bgcom_ahb_write);
@@ -621,6 +641,11 @@
 		return -EBUSY;
 	}
 
+	if (bgcom_resume(handle)) {
+		pr_err("Failed to resume\n");
+		return -EBUSY;
+	}
+
 	size = num_words*BG_SPI_WORD_SIZE;
 	txn_len = BG_SPI_READ_LEN + size;
 	tx_buf = kzalloc(txn_len, GFP_KERNEL | GFP_ATOMIC);
@@ -671,6 +696,11 @@
 		return -EBUSY;
 	}
 
+	if (bgcom_resume(handle)) {
+		pr_err("Failed to resume\n");
+		return -EBUSY;
+	}
+
 	size = num_regs*BG_SPI_WORD_SIZE;
 	txn_len = BG_SPI_WRITE_CMND_LEN + size;
 
@@ -749,11 +779,18 @@
 	uint8_t rx_buf[8] = {0};
 	uint32_t cmnd_reg = 0;
 
+	if (spi_state == BGCOM_SPI_BUSY) {
+		printk_ratelimited("SPI is held by TZ\n");
+		goto ret_err;
+	}
+
 	txn_len = 0x08;
 	tx_buf[0] = 0x05;
 	ret = bgcom_transfer(handle, tx_buf, rx_buf, txn_len);
 	if (!ret)
 		memcpy(&cmnd_reg, rx_buf+BG_SPI_READ_LEN, 0x04);
+
+ret_err:
 	return cmnd_reg & BIT(31);
 }
 
@@ -766,6 +803,9 @@
 	if (handle == NULL)
 		return -EINVAL;
 
+	if (!atomic_read(&bg_is_spi_active))
+		return -ECANCELED;
+
 	cntx = (struct bg_context *)handle;
 	bg_spi = cntx->bg_spi;
 
@@ -789,36 +829,15 @@
 		bg_soft_reset();
 		return -ETIMEDOUT;
 	}
-	pr_info("BG retries for wake up : %d\n", retry);
 	return 0;
 }
 EXPORT_SYMBOL(bgcom_resume);
 
 int bgcom_suspend(void *handle)
 {
-	struct bg_spi_priv *bg_spi;
-	struct bg_context *cntx;
-	uint32_t cmnd_reg = 0;
-	int ret = 0;
-
-	if (handle == NULL)
+	if (!handle)
 		return -EINVAL;
-
-	cntx = (struct bg_context *)handle;
-	bg_spi = cntx->bg_spi;
-	mutex_lock(&bg_resume_mutex);
-	if (bg_spi->bg_state == BGCOM_STATE_SUSPEND)
-		goto unlock;
-
-	cmnd_reg |= BIT(31);
-	ret = bgcom_reg_write(handle, BG_CMND_REG, 1, &cmnd_reg);
-	if (ret == 0)
-		bg_spi->bg_state = BGCOM_STATE_SUSPEND;
-
-unlock:
-	mutex_unlock(&bg_resume_mutex);
-	pr_info("suspended with : %d\n", ret);
-	return ret;
+	return 0;
 }
 EXPORT_SYMBOL(bgcom_suspend);
 
@@ -931,7 +950,6 @@
 	struct bg_spi_priv *bg_spi;
 	struct device_node *node;
 	int irq_gpio = 0;
-	int bg_irq = 0;
 	int ret;
 
 	bg_spi = devm_kzalloc(&spi->dev, sizeof(*bg_spi),
@@ -969,6 +987,8 @@
 	if (ret)
 		goto err_ret;
 
+	atomic_set(&bg_is_spi_active, 1);
+	dma_set_coherent_mask(&spi->dev, DMA_BIT_MASK(64));
 	pr_info("Bgcom Probed successfully\n");
 	return ret;
 
@@ -990,6 +1010,46 @@
 	return 0;
 }
 
+static int bgcom_pm_suspend(struct device *dev)
+{
+	uint32_t cmnd_reg = 0;
+	struct spi_device *s_dev = to_spi_device(dev);
+	struct bg_spi_priv *bg_spi = spi_get_drvdata(s_dev);
+	int ret = 0;
+
+	if (bg_spi->bg_state == BGCOM_STATE_SUSPEND)
+		return 0;
+
+	cmnd_reg |= BIT(31);
+	ret = read_bg_locl(BGCOM_WRITE_REG, 1, &cmnd_reg);
+	if (ret == 0) {
+		bg_spi->bg_state = BGCOM_STATE_SUSPEND;
+		atomic_set(&bg_is_spi_active, 0);
+	}
+	pr_info("suspended with : %d\n", ret);
+	return ret;
+}
+
+static int bgcom_pm_resume(struct device *dev)
+{
+	struct bg_context clnt_handle;
+	int ret;
+	struct bg_spi_priv *spi =
+		container_of(bg_com_drv, struct bg_spi_priv, lhandle);
+
+	clnt_handle.bg_spi = spi;
+	atomic_set(&bg_is_spi_active, 1);
+	ret = bgcom_resume(&clnt_handle);
+	if (ret == 0)
+		pr_info("Bgcom resumed\n");
+	return ret;
+}
+
+static const struct dev_pm_ops bgcom_pm = {
+	.suspend = bgcom_pm_suspend,
+	.resume = bgcom_pm_resume,
+};
+
 static const struct of_device_id bg_spi_of_match[] = {
 	{ .compatible = "qcom,bg-spi", },
 	{ }
@@ -1000,6 +1060,7 @@
 	.driver = {
 		.name = "bg-spi",
 		.of_match_table = bg_spi_of_match,
+		.pm = &bgcom_pm,
 	},
 	.probe = bg_spi_probe,
 	.remove = bg_spi_remove,
diff --git a/drivers/soc/qcom/big_cluster_min_freq_adjust.c b/drivers/soc/qcom/big_cluster_min_freq_adjust.c
new file mode 100644
index 0000000..979dd81
--- /dev/null
+++ b/drivers/soc/qcom/big_cluster_min_freq_adjust.c
@@ -0,0 +1,324 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)     "big_min_freq_adjust: " fmt
+
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/moduleparam.h>
+
+enum min_freq_adjust {
+	ADJUST_MIN_FLOOR,	/* Set min floor to user supplied value */
+	RESET_MIN_FLOOR,	/* Reset min floor cpuinfo value */
+};
+
+struct big_min_freq_adjust_data {
+	struct cpumask cluster_cpumask;
+	unsigned int min_freq_floor;
+	struct delayed_work min_freq_work;
+	unsigned long min_down_delay_jiffies;
+	enum min_freq_adjust min_freq_state;
+	enum min_freq_adjust min_freq_request;
+	spinlock_t lock;
+	bool big_min_freq_on;
+	bool is_init;
+};
+static struct big_min_freq_adjust_data big_min_freq_adjust_data;
+
+static void cpufreq_min_freq_work(struct work_struct *work)
+{
+	struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+
+	spin_lock(&p->lock);
+	if (p->min_freq_state == p->min_freq_request) {
+		spin_unlock(&p->lock);
+		return;
+	}
+	spin_unlock(&p->lock);
+	cpufreq_update_policy(cpumask_first(&p->cluster_cpumask));
+}
+
+static int cpufreq_callback(struct notifier_block *nb, unsigned long val,
+				void *data)
+{
+	struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+	struct cpufreq_policy *policy = data;
+	unsigned int min_freq_floor;
+
+	if (p->big_min_freq_on == false)
+		return NOTIFY_DONE;
+
+	if (val != CPUFREQ_ADJUST)
+		return NOTIFY_DONE;
+
+	if (!cpumask_test_cpu(cpumask_first(&p->cluster_cpumask),
+				policy->related_cpus))
+		return NOTIFY_DONE;
+
+	spin_lock(&p->lock);
+	if (p->min_freq_request == ADJUST_MIN_FLOOR)
+		min_freq_floor = p->min_freq_floor;
+	else
+		min_freq_floor = policy->cpuinfo.min_freq;
+	cpufreq_verify_within_limits(policy, min_freq_floor,
+			policy->cpuinfo.max_freq);
+	p->min_freq_state = p->min_freq_request;
+	spin_unlock(&p->lock);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_nb = {
+	.notifier_call = cpufreq_callback
+};
+
+#define AFFINITY_LEVEL_L2 1
+static int cpu_pm_callback(struct notifier_block *self,
+			       unsigned long cmd, void *v)
+{
+	struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+	unsigned long aff_level = (unsigned long) v;
+	unsigned long delay;
+	int cpu;
+
+	if (p->big_min_freq_on == false)
+		return NOTIFY_DONE;
+
+	if (aff_level != AFFINITY_LEVEL_L2)
+		return NOTIFY_DONE;
+
+	cpu = smp_processor_id();
+
+	if (!cpumask_test_cpu(cpu, &p->cluster_cpumask))
+		return NOTIFY_DONE;
+
+	spin_lock(&p->lock);
+	switch (cmd) {
+	case CPU_CLUSTER_PM_ENTER:
+		p->min_freq_request = RESET_MIN_FLOOR;
+		delay = p->min_down_delay_jiffies;
+		break;
+	case CPU_CLUSTER_PM_ENTER_FAILED:
+	case CPU_CLUSTER_PM_EXIT:
+		p->min_freq_request = ADJUST_MIN_FLOOR;
+		/* To avoid unnecessary oscillations between exit and idle */
+		delay = 1;
+		break;
+	default:
+		spin_unlock(&p->lock);
+		return NOTIFY_DONE;
+	}
+
+	cancel_delayed_work(&p->min_freq_work);
+
+	if (p->min_freq_state != p->min_freq_request) {
+		if (p->min_freq_request == ADJUST_MIN_FLOOR) {
+			if (p->min_freq_floor > cpufreq_quick_get(cpu))
+				delay = 0;
+		}
+		queue_delayed_work(system_unbound_wq, &p->min_freq_work, delay);
+	}
+	spin_unlock(&p->lock);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block cpu_pm_nb = {
+	.notifier_call = cpu_pm_callback
+};
+
+static unsigned long __read_mostly big_min_down_delay_ms;
+#define MIN_DOWN_DELAY_MSEC 80 /* Default big_min_down_delay in msec */
+#define POLICY_MIN 1094400 /* Default min_freq_floor in KHz */
+
+static void trigger_state_machine(void *d)
+{
+	struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+	bool *update_policy = d;
+
+	if (p->min_freq_request != ADJUST_MIN_FLOOR) {
+		p->min_freq_request = ADJUST_MIN_FLOOR;
+		*update_policy = true;
+	}
+}
+
+static int enable_big_min_freq_adjust(void)
+{
+	struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+	int ret;
+	bool update_policy = false;
+
+	if (p->big_min_freq_on == true)
+		return 0;
+
+	if (!cpumask_weight(&p->cluster_cpumask)) {
+		pr_err("Cluster CPU IDs not set\n");
+		return -EPERM;
+	}
+
+	INIT_DEFERRABLE_WORK(&p->min_freq_work, cpufreq_min_freq_work);
+
+	if (!big_min_down_delay_ms) {
+		big_min_down_delay_ms = MIN_DOWN_DELAY_MSEC;
+		p->min_down_delay_jiffies = msecs_to_jiffies(
+				big_min_down_delay_ms);
+	}
+	if (!p->min_freq_floor)
+		p->min_freq_floor = POLICY_MIN;
+
+	ret = cpu_pm_register_notifier(&cpu_pm_nb);
+	if (ret) {
+		pr_err("Failed to register for PM notification\n");
+		return ret;
+	}
+
+	ret = cpufreq_register_notifier(&cpufreq_nb, CPUFREQ_POLICY_NOTIFIER);
+	if (ret) {
+		pr_err("Failed to register for CPUFREQ POLICY notification\n");
+		cpu_pm_unregister_notifier(&cpu_pm_nb);
+		return ret;
+	}
+
+	p->min_freq_state = RESET_MIN_FLOOR;
+	p->min_freq_request = RESET_MIN_FLOOR;
+	spin_lock_init(&p->lock);
+	p->big_min_freq_on = true;
+
+	/* If BIG cluster is active at this time and continue to be active
+	 * forever, in that case min frequency of the cluster will never be
+	 * set to floor value.  This is to trigger the state machine and set
+	 * the min freq and  min_freq_state to appropriate values.
+	 *
+	 * Two possibilities here.
+	 * 1) If cluster is idle before this, the wakeup is unnecessary but
+	 * the state machine is set to proper state.
+	 * 2) If cluster is active before this, the wakeup is necessary and
+	 * the state machine is set to proper state.
+	 */
+	smp_call_function_any(&p->cluster_cpumask,
+			trigger_state_machine, &update_policy, true);
+	if (update_policy)
+		cpufreq_update_policy(cpumask_first(&p->cluster_cpumask));
+
+	pr_info("big min freq ajustment enabled\n");
+
+	return 0;
+}
+
+static bool __read_mostly big_min_freq_adjust_enabled;
+
+static int set_big_min_freq_adjust(const char *buf,
+		const struct kernel_param *kp)
+{
+	int ret;
+
+	ret = param_set_bool_enable_only(buf, kp);
+	if (ret) {
+		pr_err("Unable to set big_min_freq_adjust_enabled: %d\n", ret);
+		return ret;
+	}
+
+	if (!big_min_freq_adjust_data.is_init)
+		return ret;
+
+	return enable_big_min_freq_adjust();
+}
+
+static const struct kernel_param_ops param_ops_big_min_freq_adjust = {
+	.set = set_big_min_freq_adjust,
+	.get = param_get_bool,
+};
+module_param_cb(min_freq_adjust, &param_ops_big_min_freq_adjust,
+		&big_min_freq_adjust_enabled, 0644);
+
+module_param_named(min_freq_floor, big_min_freq_adjust_data.min_freq_floor,
+		uint, 0644);
+
+static int set_min_down_delay_ms(const char *buf, const struct kernel_param *kp)
+{
+	int ret;
+
+	ret = param_set_ulong(buf, kp);
+	if (ret) {
+		pr_err("Unable to set big_min_down_delay_ms: %d\n", ret);
+		return ret;
+	}
+
+	big_min_freq_adjust_data.min_down_delay_jiffies = msecs_to_jiffies(
+			big_min_down_delay_ms);
+
+	return 0;
+}
+
+static const struct kernel_param_ops param_ops_big_min_down_delay_ms = {
+	.set = set_min_down_delay_ms,
+	.get = param_get_ulong,
+};
+module_param_cb(min_down_delay_ms, &param_ops_big_min_down_delay_ms,
+		&big_min_down_delay_ms, 0644);
+
+#define MAX_STR_LEN 16
+static char big_min_freq_cluster[MAX_STR_LEN];
+static struct kparam_string big_min_freq_cluster_kps = {
+	.string = big_min_freq_cluster,
+	.maxlen = MAX_STR_LEN,
+};
+
+static int set_big_min_freq_cluster(const char *buf,
+		const struct kernel_param *kp)
+{
+	struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+	int ret;
+
+	if (p->big_min_freq_on == true) {
+		ret = -EPERM;
+		goto err;
+	}
+
+	ret = param_set_copystring(buf, kp);
+	if (ret)
+		goto err;
+
+	ret = cpulist_parse(big_min_freq_cluster_kps.string,
+			&p->cluster_cpumask);
+	if (ret) {
+		cpumask_clear(&p->cluster_cpumask);
+		goto err;
+	}
+
+	return 0;
+err:
+	pr_err("Unable to set big_min_freq_cluster: %d\n", ret);
+	return ret;
+}
+
+static const struct kernel_param_ops param_ops_big_min_freq_cluster = {
+	.set = set_big_min_freq_cluster,
+	.get = param_get_string,
+};
+module_param_cb(min_freq_cluster, &param_ops_big_min_freq_cluster,
+		&big_min_freq_cluster_kps, 0644);
+
+static int __init big_min_freq_adjust_init(void)
+{
+	big_min_freq_adjust_data.is_init = true;
+	if (!big_min_freq_adjust_enabled)
+		return 0;
+
+	return enable_big_min_freq_adjust();
+}
+late_initcall(big_min_freq_adjust_init);
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index c9dc547..23fde2e 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -365,6 +365,7 @@
 	res.start = readl_relaxed(dict);
 	res.end = res.start + readl_relaxed(dict + 0x4);
 	res.flags = IORESOURCE_MEM;
+	res.name = NULL;
 	iounmap(dict);
 
 	start_addr = devm_ioremap_resource(&pdev->dev, &res);
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index b8deec1..c976f17 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -1775,6 +1775,18 @@
 			return tx_pkt;
 		}
 	}
+	list_for_each_entry(tx_pkt, &ctx->tx_active, list_node) {
+		if (tx_pkt->riid == riid) {
+			if (tx_pkt->size_remaining) {
+				GLINK_ERR_CH(ctx, "%s: R[%u] TX not complete",
+						__func__, riid);
+				tx_pkt = NULL;
+			}
+			spin_unlock_irqrestore(
+				&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+			return tx_pkt;
+		}
+	}
 	spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
 
 	GLINK_ERR_CH(ctx, "%s: R[%u] Tx packet for intent not found.\n",
@@ -1817,6 +1829,20 @@
 			return;
 		}
 	}
+	list_for_each_entry_safe(local_tx_pkt, tmp_tx_pkt,
+				&ctx->tx_active, list_node) {
+		if (tx_pkt == local_tx_pkt) {
+			list_del_init(&tx_pkt->list_done);
+			GLINK_DBG_CH(ctx,
+				"%s: R[%u] Removed Tx packet for intent\n",
+				__func__,
+				tx_pkt->riid);
+			rwref_put(&tx_pkt->pkt_ref);
+			spin_unlock_irqrestore(
+				&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+			return;
+		}
+	}
 	spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
 
 	GLINK_ERR_CH(ctx, "%s: R[%u] Tx packet for intent not found", __func__,
@@ -5578,12 +5604,6 @@
 		tx_info = list_first_entry(&ctx->tx_active,
 				struct glink_core_tx_pkt, list_node);
 		rwref_get(&tx_info->pkt_ref);
-
-		spin_lock(&ctx->tx_pending_rmt_done_lock_lhc4);
-		if (list_empty(&tx_info->list_done))
-			list_add(&tx_info->list_done,
-				 &ctx->tx_pending_remote_done);
-		spin_unlock(&ctx->tx_pending_rmt_done_lock_lhc4);
 		spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
 
 		if (unlikely(tx_info->tracer_pkt)) {
@@ -5648,9 +5668,16 @@
 		txd_len += tx_len;
 		if (!tx_info->size_remaining) {
 			num_pkts++;
+			spin_lock(&ctx->tx_pending_rmt_done_lock_lhc4);
 			list_del_init(&tx_info->list_node);
+			if (list_empty(&tx_info->list_done))
+				list_add(&tx_info->list_done,
+					&ctx->tx_pending_remote_done);
+			rwref_put(&tx_info->pkt_ref);
+			spin_unlock(&ctx->tx_pending_rmt_done_lock_lhc4);
+		} else {
+			rwref_put(&tx_info->pkt_ref);
 		}
-		rwref_put(&tx_info->pkt_ref);
 	}
 
 	ctx->txd_len += txd_len;
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index ca9953a..5640666 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -949,6 +949,12 @@
 		return;
 	}
 
+	if (!einfo->rx_fifo) {
+		if (!get_rx_fifo(einfo))
+			return;
+		einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+	}
+
 	if ((atomic_ctx) && ((einfo->tx_resume_needed) ||
 		(waitqueue_active(&einfo->tx_blocked_queue)))) /* tx waiting ?*/
 		tx_wakeup_worker(einfo);
@@ -1554,10 +1560,10 @@
 	struct edge_info *einfo;
 
 	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+	einfo->in_ssr = false;
 	if (!einfo->rx_fifo) {
 		if (!get_rx_fifo(einfo))
 			return;
-		einfo->in_ssr = false;
 		einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
 	}
 }
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index c254299..b8e9268 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -2113,6 +2113,7 @@
 
 	set_bit(ICNSS_WLFW_EXISTS, &penv->state);
 	clear_bit(ICNSS_FW_DOWN, &penv->state);
+	icnss_ignore_qmi_timeout(false);
 
 	penv->wlfw_clnt = qmi_handle_create(icnss_qmi_wlfw_clnt_notify, penv);
 	if (!penv->wlfw_clnt) {
@@ -2450,8 +2451,10 @@
 	int ret = 0;
 	struct icnss_event_pd_service_down_data *event_data = data;
 
-	if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
+	if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state)) {
+		icnss_ignore_qmi_timeout(false);
 		goto out;
+	}
 
 	if (priv->force_err_fatal)
 		ICNSS_ASSERT(0);
@@ -2475,8 +2478,6 @@
 out:
 	kfree(data);
 
-	icnss_ignore_qmi_timeout(false);
-
 	return ret;
 }
 
@@ -2485,15 +2486,16 @@
 {
 	int ret = 0;
 
-	if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
+	if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state)) {
+		icnss_ignore_qmi_timeout(false);
 		goto out;
+	}
 
 	priv->early_crash_ind = true;
 	icnss_fw_crashed(priv, NULL);
 
 out:
 	kfree(data);
-	icnss_ignore_qmi_timeout(false);
 
 	return ret;
 }
@@ -3178,7 +3180,8 @@
 	if (!dev)
 		return -ENODEV;
 
-	if (test_bit(ICNSS_FW_DOWN, &penv->state)) {
+	if (test_bit(ICNSS_FW_DOWN, &penv->state) ||
+	    !test_bit(ICNSS_FW_READY, &penv->state)) {
 		icnss_pr_err("FW down, ignoring fw_log_mode state: 0x%lx\n",
 			     penv->state);
 		return -EINVAL;
@@ -3277,7 +3280,8 @@
 	if (!dev)
 		return -ENODEV;
 
-	if (test_bit(ICNSS_FW_DOWN, &penv->state)) {
+	if (test_bit(ICNSS_FW_DOWN, &penv->state) ||
+	    !test_bit(ICNSS_FW_READY, &penv->state)) {
 		icnss_pr_err("FW down, ignoring wlan_enable state: 0x%lx\n",
 			     penv->state);
 		return -EINVAL;
diff --git a/drivers/soc/qcom/idle.h b/drivers/soc/qcom/idle.h
new file mode 100644
index 0000000..2f852c3
--- /dev/null
+++ b/drivers/soc/qcom/idle.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2007-2009,2012-2014, 2018, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_IDLE_H_
+#define _ARCH_ARM_MACH_MSM_IDLE_H_
+
+#define MAX_CPUS_PER_CLUSTER	4
+#define MAX_NUM_CLUSTER	4
+
+#ifndef __ASSEMBLY__
+#if defined(CONFIG_CPU_V7) || defined(CONFIG_ARM64)
+extern unsigned long msm_pm_boot_vector[MAX_NUM_CLUSTER * MAX_CPUS_PER_CLUSTER];
+void msm_pm_boot_entry(void);
+#else
+static inline void msm_pm_boot_entry(void) {}
+#endif
+#endif
+#endif
diff --git a/drivers/soc/qcom/jtagv8-etm.c b/drivers/soc/qcom/jtagv8-etm.c
index 3f4b8bc..23cbb7b 100644
--- a/drivers/soc/qcom/jtagv8-etm.c
+++ b/drivers/soc/qcom/jtagv8-etm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -345,7 +345,7 @@
 						       TRCCNTVRn(j));
 		}
 		/* resource selection registers */
-		for (j = 0; j < etmdata->nr_resource; j++)
+		for (j = 0; j < etmdata->nr_resource * 2; j++)
 			etmdata->state[i++] = etm_readl(etmdata, TRCRSCTLRn(j));
 		/* comparator registers */
 		for (j = 0; j < etmdata->nr_addr_cmp * 2; j++) {
@@ -448,7 +448,7 @@
 			etm_writel(etmdata, etmdata->state[i++], TRCCNTVRn(j));
 		}
 		/* resource selection registers */
-		for (j = 0; j < etmdata->nr_resource; j++)
+		for (j = 0; j < etmdata->nr_resource * 2; j++)
 			etm_writel(etmdata, etmdata->state[i++], TRCRSCTLRn(j));
 		/* comparator registers */
 		for (j = 0; j < etmdata->nr_addr_cmp * 2; j++) {
@@ -932,7 +932,7 @@
 		for (j = 0; j < etmdata->nr_cntr; j++)
 			i = etm_read_crxr(etmdata->state, i, j);
 		/* resource selection registers */
-		for (j = 0; j < etmdata->nr_resource; j++)
+		for (j = 0; j < etmdata->nr_resource * 2; j++)
 			i = etm_read_rsxr(etmdata->state, i, j + 2);
 		/* comparator registers */
 		for (j = 0; j < etmdata->nr_addr_cmp * 2; j++)
@@ -1387,7 +1387,7 @@
 		for (j = 0; j < etmdata->nr_cntr; j++)
 			i = etm_write_crxr(etmdata->state, i, j);
 		/* resource selection registers */
-		for (j = 0; j < etmdata->nr_resource; j++)
+		for (j = 0; j < etmdata->nr_resource * 2; j++)
 			i = etm_write_rsxr(etmdata->state, i, j + 2);
 		/* comparator registers */
 		for (j = 0; j < etmdata->nr_addr_cmp * 2; j++)
@@ -1496,7 +1496,7 @@
 	val = etm_readl(etmdata, TRCIDR4);
 	etmdata->nr_addr_cmp = BMVAL(val, 0, 3);
 	etmdata->nr_data_cmp = BMVAL(val, 4, 7);
-	etmdata->nr_resource = BMVAL(val, 16, 19);
+	etmdata->nr_resource = BMVAL(val, 16, 19) + 1;
 	etmdata->nr_ss_cmp = BMVAL(val, 20, 23);
 	etmdata->nr_ctxid_cmp = BMVAL(val, 24, 27);
 	etmdata->nr_vmid_cmp = BMVAL(val, 28, 31);
diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c
index 696c043..6542861 100644
--- a/drivers/soc/qcom/memshare/msm_memshare.c
+++ b/drivers/soc/qcom/memshare/msm_memshare.c
@@ -406,6 +406,7 @@
 				memblock[i].peripheral ==
 				DHMS_MEM_PROC_MPSS_V01 &&
 				!memblock[i].guarantee &&
+				!memblock[i].client_request &&
 				memblock[i].allotted &&
 				!memblock[i].alloc_request) {
 				pr_debug("memshare: hypervisor unmapping  for client id: %d\n",
@@ -665,9 +666,10 @@
 					__func__);
 		flag = 1;
 	} else if (!memblock[client_id].guarantee &&
-					memblock[client_id].allotted) {
-		pr_debug("memshare: %s: size: %d",
-				__func__, memblock[client_id].size);
+				!memblock[client_id].client_request &&
+				memblock[client_id].allotted) {
+		pr_debug("memshare: %s:client_id:%d - size: %d",
+				__func__, client_id, memblock[client_id].size);
 		ret = hyp_assign_phys(memblock[client_id].phy_addr,
 				memblock[client_id].size, source_vmlist, 1,
 				dest_vmids, dest_perms, 1);
@@ -676,8 +678,8 @@
 		 * This is an error case as hyp mapping was successful
 		 * earlier but during unmap it lead to failure.
 		 */
-			pr_err("memshare: %s, failed to unmap the region\n",
-				__func__);
+			pr_err("memshare: %s, failed to unmap the region for client id:%d\n",
+				__func__, client_id);
 		}
 		size = memblock[client_id].size;
 		if (memblock[client_id].client_id == 1) {
@@ -696,8 +698,8 @@
 			attrs);
 		free_client(client_id);
 	} else {
-		pr_err("memshare: %s, Request came for a guaranteed client cannot free up the memory\n",
-						__func__);
+		pr_err("memshare: %s, Request came for a guaranteed client (client_id: %d) cannot free up the memory\n",
+						__func__, client_id);
 	}
 
 	if (flag) {
@@ -992,6 +994,10 @@
 							pdev->dev.of_node,
 							"qcom,allocate-boot-time");
 
+	memblock[num_clients].client_request = of_property_read_bool(
+							pdev->dev.of_node,
+							"qcom,allocate-on-request");
+
 	rc = of_property_read_string(pdev->dev.of_node, "label",
 						&name);
 	if (rc) {
diff --git a/drivers/soc/qcom/memshare/msm_memshare.h b/drivers/soc/qcom/memshare/msm_memshare.h
index 6b54652..908f091 100644
--- a/drivers/soc/qcom/memshare/msm_memshare.h
+++ b/drivers/soc/qcom/memshare/msm_memshare.h
@@ -41,6 +41,8 @@
 	uint32_t allotted;
 	/* Memory allocation request received or not */
 	uint32_t alloc_request;
+	/* Allocation on request from a client*/
+	uint32_t client_request;
 	/* Size required for client */
 	uint32_t size;
 	/*
diff --git a/drivers/soc/qcom/msm-pm.c b/drivers/soc/qcom/msm-pm.c
new file mode 100644
index 0000000..129ebce
--- /dev/null
+++ b/drivers/soc/qcom/msm-pm.c
@@ -0,0 +1,921 @@
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/smp.h>
+#include <linux/tick.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/msm-bus.h>
+#include <linux/uaccess.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm-legacy.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/scm-boot.h>
+#include <asm/suspend.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/system_misc.h>
+#ifdef CONFIG_VFP
+#include <asm/vfp.h>
+#endif
+#include <soc/qcom/jtag.h>
+#include "pm-boot.h"
+#include "idle.h"
+
+#define SCM_CMD_TERMINATE_PC	(0x2)
+#define SCM_CMD_CORE_HOTPLUGGED (0x10)
+#define SCM_FLUSH_FLAG_MASK	(0x3)
+
+#define SCLK_HZ (32768)
+
+#define MAX_BUF_SIZE  1024
+
+static int msm_pm_debug_mask = 1;
+module_param_named(
+	debug_mask, msm_pm_debug_mask, int, 0664
+);
+
+enum {
+	MSM_PM_DEBUG_SUSPEND = BIT(0),
+	MSM_PM_DEBUG_POWER_COLLAPSE = BIT(1),
+	MSM_PM_DEBUG_SUSPEND_LIMITS = BIT(2),
+	MSM_PM_DEBUG_CLOCK = BIT(3),
+	MSM_PM_DEBUG_RESET_VECTOR = BIT(4),
+	MSM_PM_DEBUG_IDLE = BIT(5),
+	MSM_PM_DEBUG_IDLE_LIMITS = BIT(6),
+	MSM_PM_DEBUG_HOTPLUG = BIT(7),
+};
+
+enum msm_pc_count_offsets {
+	MSM_PC_ENTRY_COUNTER,
+	MSM_PC_EXIT_COUNTER,
+	MSM_PC_FALLTHRU_COUNTER,
+	MSM_PC_UNUSED,
+	MSM_PC_NUM_COUNTERS,
+};
+
+static bool msm_pm_ldo_retention_enabled = true;
+static bool msm_pm_tz_flushes_cache;
+static bool msm_pm_ret_no_pll_switch;
+static bool msm_no_ramp_down_pc;
+static struct msm_pm_sleep_status_data *msm_pm_slp_sts;
+static DEFINE_PER_CPU(struct clk *, cpu_clks);
+static struct clk *l2_clk;
+
+static long *msm_pc_debug_counters;
+
+static cpumask_t retention_cpus;
+static DEFINE_SPINLOCK(retention_lock);
+static DEFINE_MUTEX(msm_pc_debug_mutex);
+
+static bool msm_pm_is_L1_writeback(void)
+{
+	u32 cache_id = 0;
+
+#if defined(CONFIG_CPU_V7)
+	u32 sel = 0;
+
+	asm volatile ("mcr p15, 2, %[ccselr], c0, c0, 0\n\t"
+		      "isb\n\t"
+		      "mrc p15, 1, %[ccsidr], c0, c0, 0\n\t"
+		      :[ccsidr]"=r" (cache_id)
+		      :[ccselr]"r" (sel)
+		     );
+	return cache_id & BIT(30);
+#elif defined(CONFIG_ARM64)
+	u32 sel = 0;
+
+	asm volatile("msr csselr_el1, %[ccselr]\n\t"
+		     "isb\n\t"
+		     "mrs %[ccsidr],ccsidr_el1\n\t"
+		     :[ccsidr]"=r" (cache_id)
+		     :[ccselr]"r" (sel)
+		    );
+	return cache_id & BIT(30);
+#else
+#error No valid CPU arch selected
+#endif
+}
+
+static bool msm_pm_swfi(bool from_idle)
+{
+	msm_arch_idle();
+	return true;
+}
+
+static bool msm_pm_retention(bool from_idle)
+{
+	int ret = 0;
+	unsigned int cpu = smp_processor_id();
+	struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
+
+	spin_lock(&retention_lock);
+
+	if (!msm_pm_ldo_retention_enabled)
+		goto bailout;
+
+	cpumask_set_cpu(cpu, &retention_cpus);
+	spin_unlock(&retention_lock);
+
+	if (!msm_pm_ret_no_pll_switch)
+		clk_disable(cpu_clk);
+
+	ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_RETENTION, false);
+	WARN_ON(ret);
+
+	msm_arch_idle();
+
+	ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
+	WARN_ON(ret);
+
+	if (!msm_pm_ret_no_pll_switch)
+		if (clk_enable(cpu_clk))
+			pr_err("%s(): Error restore cpu clk\n", __func__);
+
+	spin_lock(&retention_lock);
+	cpumask_clear_cpu(cpu, &retention_cpus);
+bailout:
+	spin_unlock(&retention_lock);
+	return true;
+}
+
+static inline void msm_pc_inc_debug_count(uint32_t cpu,
+		enum msm_pc_count_offsets offset)
+{
+	int cntr_offset;
+	uint32_t cluster_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+	uint32_t cpu_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
+
+	if (cluster_id >= MAX_NUM_CLUSTER || cpu_id >= MAX_CPUS_PER_CLUSTER)
+		WARN_ON(cpu);
+
+	cntr_offset = (cluster_id * MAX_CPUS_PER_CLUSTER * MSM_PC_NUM_COUNTERS)
+			 + (cpu_id * MSM_PC_NUM_COUNTERS) + offset;
+
+	if (!msm_pc_debug_counters)
+		return;
+
+	msm_pc_debug_counters[cntr_offset]++;
+}
+
+static bool msm_pm_pc_hotplug(void)
+{
+	uint32_t cpu = smp_processor_id();
+	enum msm_pm_l2_scm_flag flag;
+	struct scm_desc desc;
+
+	flag = lpm_cpu_pre_pc_cb(cpu);
+
+	if (!msm_pm_tz_flushes_cache) {
+		if (flag == MSM_SCM_L2_OFF)
+			flush_cache_all();
+		else if (msm_pm_is_L1_writeback())
+			flush_cache_louis();
+	}
+
+	msm_pc_inc_debug_count(cpu, MSM_PC_ENTRY_COUNTER);
+
+	if (is_scm_armv8()) {
+		desc.args[0] = SCM_CMD_CORE_HOTPLUGGED |
+			       (flag & SCM_FLUSH_FLAG_MASK);
+		desc.arginfo = SCM_ARGS(1);
+		scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
+				 SCM_CMD_TERMINATE_PC), &desc);
+	} else {
+		scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC,
+		SCM_CMD_CORE_HOTPLUGGED | (flag & SCM_FLUSH_FLAG_MASK));
+	}
+
+	/* Should not return here */
+	msm_pc_inc_debug_count(cpu, MSM_PC_FALLTHRU_COUNTER);
+	return 0;
+}
+
+static bool msm_pm_fastpc(bool from_idle)
+{
+	int ret = 0;
+	unsigned int cpu = smp_processor_id();
+
+	ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_FASTPC, false);
+	WARN_ON(ret);
+
+	if (from_idle || cpu_online(cpu))
+		msm_arch_idle();
+	else
+		msm_pm_pc_hotplug();
+
+	ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
+	WARN_ON(ret);
+
+	return true;
+}
+
+int msm_pm_collapse(unsigned long unused)
+{
+	uint32_t cpu = smp_processor_id();
+	enum msm_pm_l2_scm_flag flag;
+	struct scm_desc desc;
+
+	flag = lpm_cpu_pre_pc_cb(cpu);
+
+	if (!msm_pm_tz_flushes_cache) {
+		if (flag == MSM_SCM_L2_OFF)
+			flush_cache_all();
+		else if (msm_pm_is_L1_writeback())
+			flush_cache_louis();
+	}
+	msm_pc_inc_debug_count(cpu, MSM_PC_ENTRY_COUNTER);
+
+	if (is_scm_armv8()) {
+		desc.args[0] = flag;
+		desc.arginfo = SCM_ARGS(1);
+		scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
+				 SCM_CMD_TERMINATE_PC), &desc);
+	} else {
+		scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC, flag);
+	}
+
+	msm_pc_inc_debug_count(cpu, MSM_PC_FALLTHRU_COUNTER);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_pm_collapse);
+
+static bool __ref msm_pm_spm_power_collapse(
+	unsigned int cpu, int mode, bool from_idle, bool notify_rpm)
+{
+	void *entry;
+	bool collapsed = 0;
+	int ret;
+	bool save_cpu_regs = (cpu_online(cpu) || from_idle);
+
+	if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+		pr_info("CPU%u: %s: notify_rpm %d\n",
+			cpu, __func__, (int) notify_rpm);
+
+	ret = msm_spm_set_low_power_mode(mode, notify_rpm);
+	WARN_ON(ret);
+
+	entry = save_cpu_regs ?  cpu_resume : msm_secondary_startup;
+
+	msm_pm_boot_config_before_pc(cpu, virt_to_phys(entry));
+
+	if (MSM_PM_DEBUG_RESET_VECTOR & msm_pm_debug_mask)
+		pr_info("CPU%u: %s: program vector to %pk\n",
+			cpu, __func__, entry);
+
+	msm_jtag_save_state();
+
+	collapsed = save_cpu_regs ?
+		!cpu_suspend(0, msm_pm_collapse) : msm_pm_pc_hotplug();
+
+	msm_jtag_restore_state();
+
+	if (collapsed)
+		local_fiq_enable();
+
+	msm_pm_boot_config_after_pc(cpu);
+
+	if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+		pr_info("CPU%u: %s: msm_pm_collapse returned, collapsed %d\n",
+			cpu, __func__, collapsed);
+
+	ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
+	WARN_ON(ret);
+	return collapsed;
+}
+
+static bool msm_pm_power_collapse_standalone(
+		bool from_idle)
+{
+	unsigned int cpu = smp_processor_id();
+	bool collapsed;
+
+	collapsed = msm_pm_spm_power_collapse(cpu,
+			MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE,
+			from_idle, false);
+
+	return collapsed;
+}
+
+static int ramp_down_last_cpu(int cpu)
+{
+	struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
+	int ret = 0;
+
+	clk_disable(cpu_clk);
+	clk_disable(l2_clk);
+
+	return ret;
+}
+
+static int ramp_up_first_cpu(int cpu, int saved_rate)
+{
+	struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
+	int rc = 0;
+
+	if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
+		pr_info("CPU%u: %s: restore clock rate\n",
+				cpu, __func__);
+
+	clk_enable(l2_clk);
+
+	if (cpu_clk) {
+		int ret = clk_enable(cpu_clk);
+
+		if (ret) {
+			pr_err("%s(): Error restoring cpu clk\n",
+					__func__);
+			return ret;
+		}
+	}
+
+	return rc;
+}
+
+static bool msm_pm_power_collapse(bool from_idle)
+{
+	unsigned int cpu = smp_processor_id();
+	unsigned long saved_acpuclk_rate = 0;
+	bool collapsed;
+
+	if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+		pr_info("CPU%u: %s: idle %d\n",
+			cpu, __func__, (int)from_idle);
+
+	if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+		pr_info("CPU%u: %s: pre power down\n", cpu, __func__);
+
+	if (cpu_online(cpu) && !msm_no_ramp_down_pc)
+		saved_acpuclk_rate = ramp_down_last_cpu(cpu);
+
+	collapsed = msm_pm_spm_power_collapse(cpu, MSM_SPM_MODE_POWER_COLLAPSE,
+			from_idle, true);
+
+	if (cpu_online(cpu) && !msm_no_ramp_down_pc)
+		ramp_up_first_cpu(cpu, saved_acpuclk_rate);
+
+	if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+		pr_info("CPU%u: %s: post power up\n", cpu, __func__);
+
+	if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+		pr_info("CPU%u: %s: return\n", cpu, __func__);
+	return collapsed;
+}
+/******************************************************************************
+ * External Idle/Suspend Functions
+ *****************************************************************************/
+
+static void arch_idle(void) {}
+
+static bool (*execute[MSM_PM_SLEEP_MODE_NR])(bool idle) = {
+	[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = msm_pm_swfi,
+	[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
+		msm_pm_power_collapse_standalone,
+	[MSM_PM_SLEEP_MODE_RETENTION] = msm_pm_retention,
+	[MSM_PM_SLEEP_MODE_FASTPC] = msm_pm_fastpc,
+	[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = msm_pm_power_collapse,
+};
+
+/**
+ * msm_cpu_pm_enter_sleep(): Enter a low power mode on current cpu
+ *
+ * @mode - sleep mode to enter
+ * @from_idle - bool to indicate that the mode is exercised during idle/suspend
+ *
+ * returns none
+ *
+ * The code should be with interrupts disabled and on the core on which the
+ * low power is to be executed.
+ *
+ */
+bool msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode, bool from_idle)
+{
+	bool exit_stat = false;
+	unsigned int cpu = smp_processor_id();
+
+	if ((!from_idle  && cpu_online(cpu))
+			|| (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask))
+		pr_info("CPU%u:%s mode:%d during %s\n", cpu, __func__,
+				mode, from_idle ? "idle" : "suspend");
+
+	if (execute[mode])
+		exit_stat = execute[mode](from_idle);
+
+	return exit_stat;
+}
+
+/**
+ * msm_pm_wait_cpu_shutdown() - Wait for a core to be power collapsed during
+ *				hotplug
+ *
+ * @ cpu - cpu to wait on.
+ *
+ * Blocking function call that waits on the core to be power collapsed. This
+ * function is called from platform_cpu_die to ensure that a core is power
+ * collapsed before sending the CPU_DEAD notification so the drivers could
+ * remove the resource votes for this CPU(regulator and clock)
+ */
+int msm_pm_wait_cpu_shutdown(unsigned int cpu)
+{
+	int timeout = 0;
+
+	if (!msm_pm_slp_sts)
+		return 0;
+	if (!msm_pm_slp_sts[cpu].base_addr)
+		return 0;
+	while (1) {
+		/*
+		 * Check for the SPM of the core being hotplugged to set
+		 * its sleep state.The SPM sleep state indicates that the
+		 * core has been power collapsed.
+		 */
+		int acc_sts = __raw_readl(msm_pm_slp_sts[cpu].base_addr);
+
+		if (acc_sts & msm_pm_slp_sts[cpu].mask)
+			return 0;
+
+		udelay(100);
+		/*
+		 * Dump spm registers for debugging
+		 */
+		if (++timeout == 20) {
+			msm_spm_dump_regs(cpu);
+			__WARN_printf(
+			"CPU%u didn't collapse in 2ms, sleep status: 0x%x\n",
+								cpu, acc_sts);
+		}
+	}
+
+	return -EBUSY;
+}
+
+static void msm_pm_ack_retention_disable(void *data)
+{
+	/*
+	 * This is a NULL function to ensure that the core has woken up
+	 * and is safe to disable retention.
+	 */
+}
+/**
+ * msm_pm_enable_retention() - Disable/Enable retention on all cores
+ * @enable: Enable/Disable retention
+ *
+ */
+void msm_pm_enable_retention(bool enable)
+{
+	if (enable == msm_pm_ldo_retention_enabled)
+		return;
+
+	msm_pm_ldo_retention_enabled = enable;
+
+	/*
+	 * If retention is being disabled, wakeup all online core to ensure
+	 * that it isn't executing retention. Offlined cores need not be woken
+	 * up as they enter the deepest sleep mode, namely RPM assited power
+	 * collapse
+	 */
+	if (!enable) {
+		preempt_disable();
+		smp_call_function_many(&retention_cpus,
+				msm_pm_ack_retention_disable,
+				NULL, true);
+		preempt_enable();
+	}
+}
+EXPORT_SYMBOL(msm_pm_enable_retention);
+
+/**
+ * msm_pm_retention_enabled() - Check if retention is enabled
+ *
+ * returns true if retention is enabled
+ */
+bool msm_pm_retention_enabled(void)
+{
+	return msm_pm_ldo_retention_enabled;
+}
+EXPORT_SYMBOL(msm_pm_retention_enabled);
+
+static int msm_pm_snoc_client_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	static struct msm_bus_scale_pdata *msm_pm_bus_pdata;
+	static uint32_t msm_pm_bus_client;
+
+	msm_pm_bus_pdata = msm_bus_cl_get_pdata(pdev);
+
+	if (msm_pm_bus_pdata) {
+		msm_pm_bus_client =
+			msm_bus_scale_register_client(msm_pm_bus_pdata);
+
+		if (!msm_pm_bus_client) {
+			pr_err("%s: Failed to register SNOC client", __func__);
+			rc = -ENXIO;
+			goto snoc_cl_probe_done;
+		}
+
+		rc = msm_bus_scale_client_update_request(msm_pm_bus_client, 1);
+
+		if (rc)
+			pr_err("%s: Error setting bus rate", __func__);
+	}
+
+snoc_cl_probe_done:
+	return rc;
+}
+
+static int msm_cpu_status_probe(struct platform_device *pdev)
+{
+	u32 cpu;
+	int rc;
+
+	if (!pdev || !pdev->dev.of_node)
+		return -EFAULT;
+
+	msm_pm_slp_sts = devm_kzalloc(&pdev->dev,
+			sizeof(*msm_pm_slp_sts) * num_possible_cpus(),
+			GFP_KERNEL);
+
+	if (!msm_pm_slp_sts)
+		return -ENOMEM;
+
+
+	for_each_possible_cpu(cpu) {
+		struct device_node *cpun, *node;
+		char *key;
+
+		cpun = of_get_cpu_node(cpu, NULL);
+
+		if (!cpun) {
+			__WARN();
+			continue;
+		}
+
+		node = of_parse_phandle(cpun, "qcom,sleep-status", 0);
+		if (!node)
+			return -ENODEV;
+
+		msm_pm_slp_sts[cpu].base_addr = of_iomap(node, 0);
+		if (!msm_pm_slp_sts[cpu].base_addr) {
+			pr_err("%s: Can't find base addr\n", __func__);
+			return -ENODEV;
+		}
+
+		key = "qcom,sleep-status-mask";
+		rc = of_property_read_u32(node, key, &msm_pm_slp_sts[cpu].mask);
+		if (rc) {
+			pr_err("%s: Can't find %s property\n", __func__, key);
+			iounmap(msm_pm_slp_sts[cpu].base_addr);
+			return rc;
+		}
+	}
+
+	return 0;
+};
+
+static const struct of_device_id msm_slp_sts_match_tbl[] = {
+	{.compatible = "qcom,cpu-sleep-status"},
+	{},
+};
+
+static struct platform_driver msm_cpu_status_driver = {
+	.probe = msm_cpu_status_probe,
+	.driver = {
+		.name = "cpu_slp_status",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_slp_sts_match_tbl,
+	},
+};
+
+static const struct of_device_id msm_snoc_clnt_match_tbl[] = {
+	{.compatible = "qcom,pm-snoc-client"},
+	{},
+};
+
+static struct platform_driver msm_cpu_pm_snoc_client_driver = {
+	.probe = msm_pm_snoc_client_probe,
+	.driver = {
+		.name = "pm_snoc_client",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_snoc_clnt_match_tbl,
+	},
+};
+
+struct msm_pc_debug_counters_buffer {
+	long *reg;
+	u32 len;
+	char buf[MAX_BUF_SIZE];
+};
+
+static char *counter_name[MSM_PC_NUM_COUNTERS] = {
+		"PC Entry Counter",
+		"Warmboot Entry Counter",
+		"PC Bailout Counter"
+};
+
+static int msm_pc_debug_counters_copy(
+		struct msm_pc_debug_counters_buffer *data)
+{
+	int j;
+	u32 stat;
+	unsigned int cpu;
+	unsigned int len;
+	uint32_t cluster_id;
+	uint32_t cpu_id;
+	uint32_t offset;
+
+	for_each_possible_cpu(cpu) {
+		len = scnprintf(data->buf + data->len,
+				sizeof(data->buf)-data->len,
+				"CPU%d\n", cpu);
+		cluster_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+		cpu_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
+		offset = (cluster_id * MAX_CPUS_PER_CLUSTER
+				 * MSM_PC_NUM_COUNTERS)
+				 + (cpu_id * MSM_PC_NUM_COUNTERS);
+
+		data->len += len;
+
+		for (j = 0; j < MSM_PC_NUM_COUNTERS - 1; j++) {
+			stat = data->reg[offset + j];
+			len = scnprintf(data->buf + data->len,
+					 sizeof(data->buf) - data->len,
+					"\t%s: %d", counter_name[j], stat);
+
+			data->len += len;
+		}
+		len = scnprintf(data->buf + data->len,
+			 sizeof(data->buf) - data->len,
+			"\n");
+
+		data->len += len;
+	}
+
+	return data->len;
+}
+
+static ssize_t msm_pc_debug_counters_file_read(struct file *file,
+		char __user *bufu, size_t count, loff_t *ppos)
+{
+	struct msm_pc_debug_counters_buffer *data;
+	ssize_t ret;
+
+	mutex_lock(&msm_pc_debug_mutex);
+	data = file->private_data;
+
+	if (!data) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (!bufu) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (!access_ok(VERIFY_WRITE, bufu, count)) {
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	if (*ppos >= data->len && data->len == 0)
+		data->len = msm_pc_debug_counters_copy(data);
+
+	ret = simple_read_from_buffer(bufu, count, ppos,
+			data->buf, data->len);
+exit:
+	mutex_unlock(&msm_pc_debug_mutex);
+	return ret;
+}
+
+static int msm_pc_debug_counters_file_open(struct inode *inode,
+		struct file *file)
+{
+	struct msm_pc_debug_counters_buffer *buf;
+	int ret = 0;
+
+	mutex_lock(&msm_pc_debug_mutex);
+
+	if (!inode->i_private) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	file->private_data = kzalloc(
+		sizeof(struct msm_pc_debug_counters_buffer), GFP_KERNEL);
+
+	if (!file->private_data) {
+		pr_err("%s: ERROR kmalloc failed to allocate %zu bytes\n",
+		__func__, sizeof(struct msm_pc_debug_counters_buffer));
+
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	buf = file->private_data;
+	buf->reg = (long *)inode->i_private;
+
+exit:
+	mutex_unlock(&msm_pc_debug_mutex);
+	return ret;
+}
+
+static int msm_pc_debug_counters_file_close(struct inode *inode,
+		struct file *file)
+{
+	mutex_lock(&msm_pc_debug_mutex);
+	kfree(file->private_data);
+	mutex_unlock(&msm_pc_debug_mutex);
+	return 0;
+}
+
+static const struct file_operations msm_pc_debug_counters_fops = {
+	.open = msm_pc_debug_counters_file_open,
+	.read = msm_pc_debug_counters_file_read,
+	.release = msm_pc_debug_counters_file_close,
+	.llseek = no_llseek,
+};
+
+static int msm_pm_clk_init(struct platform_device *pdev)
+{
+	bool synced_clocks;
+	u32 cpu;
+	char clk_name[] = "cpu??_clk";
+	char *key;
+
+	key = "qcom,saw-turns-off-pll";
+	if (of_property_read_bool(pdev->dev.of_node, key))
+		return 0;
+
+	key = "qcom,synced-clocks";
+	synced_clocks = of_property_read_bool(pdev->dev.of_node, key);
+
+	for_each_possible_cpu(cpu) {
+		struct clk *clk;
+
+		snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
+		clk = clk_get(&pdev->dev, clk_name);
+		if (IS_ERR(clk)) {
+			if (cpu && synced_clocks)
+				return 0;
+			clk = NULL;
+		}
+		per_cpu(cpu_clks, cpu) = clk;
+	}
+
+	if (synced_clocks)
+		return 0;
+
+	l2_clk = clk_get(&pdev->dev, "l2_clk");
+	if (IS_ERR(l2_clk))
+		pr_warn("%s: Could not get l2_clk (-%ld)\n", __func__,
+			PTR_ERR(l2_clk));
+
+	return 0;
+}
+
+static int msm_cpu_pm_probe(struct platform_device *pdev)
+{
+	struct dentry *dent = NULL;
+	struct resource *res = NULL;
+	int ret = 0;
+	void __iomem *msm_pc_debug_counters_imem;
+	char *key;
+	int alloc_size = (MAX_NUM_CLUSTER * MAX_CPUS_PER_CLUSTER
+					* MSM_PC_NUM_COUNTERS
+					* sizeof(*msm_pc_debug_counters));
+
+	msm_pc_debug_counters = dma_alloc_coherent(&pdev->dev, alloc_size,
+				&msm_pc_debug_counters_phys, GFP_KERNEL);
+
+	if (msm_pc_debug_counters) {
+		memset(msm_pc_debug_counters, 0, alloc_size);
+		dent = debugfs_create_file("pc_debug_counter", 0444, NULL,
+				msm_pc_debug_counters,
+				&msm_pc_debug_counters_fops);
+		if (!dent)
+			pr_err("%s: ERROR debugfs_create_file failed\n",
+					__func__);
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!res)
+			goto skip_save_imem;
+		msm_pc_debug_counters_imem = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+		if (msm_pc_debug_counters_imem) {
+			writel_relaxed(msm_pc_debug_counters_phys,
+					msm_pc_debug_counters_imem);
+			/* memory barrier */
+			mb();
+			devm_iounmap(&pdev->dev,
+					msm_pc_debug_counters_imem);
+		}
+	} else {
+		msm_pc_debug_counters = NULL;
+		msm_pc_debug_counters_phys = 0;
+	}
+skip_save_imem:
+	if (pdev->dev.of_node) {
+		key = "qcom,tz-flushes-cache";
+		msm_pm_tz_flushes_cache =
+				of_property_read_bool(pdev->dev.of_node, key);
+
+		key = "qcom,no-pll-switch-for-retention";
+		msm_pm_ret_no_pll_switch =
+				of_property_read_bool(pdev->dev.of_node, key);
+
+		ret = msm_pm_clk_init(pdev);
+		if (ret) {
+			pr_info("msm_pm_clk_init returned error\n");
+			return ret;
+		}
+	}
+
+	if (pdev->dev.of_node)
+		of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+	return ret;
+}
+
+static const struct of_device_id msm_cpu_pm_table[] = {
+	{.compatible = "qcom,pm"},
+	{},
+};
+
+static struct platform_driver msm_cpu_pm_driver = {
+	.probe = msm_cpu_pm_probe,
+	.driver = {
+		.name = "msm-pm",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_cpu_pm_table,
+	},
+};
+
+static int __init msm_pm_drv_init(void)
+{
+	int rc;
+
+	cpumask_clear(&retention_cpus);
+
+	rc = platform_driver_register(&msm_cpu_pm_snoc_client_driver);
+
+	if (rc)
+		pr_err("%s(): failed to register driver %s\n", __func__,
+				msm_cpu_pm_snoc_client_driver.driver.name);
+	return rc;
+}
+late_initcall(msm_pm_drv_init);
+
+static int __init msm_pm_debug_counters_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&msm_cpu_pm_driver);
+
+	if (rc)
+		pr_err("%s(): failed to register driver %s\n", __func__,
+				msm_cpu_pm_driver.driver.name);
+	return rc;
+}
+fs_initcall(msm_pm_debug_counters_init);
+
+int __init msm_pm_sleep_status_init(void)
+{
+	static bool registered;
+
+	if (registered)
+		return 0;
+	registered = true;
+
+	return platform_driver_register(&msm_cpu_status_driver);
+}
+arch_initcall(msm_pm_sleep_status_init);
+
+#ifdef CONFIG_ARM
+static int idle_initialize(void)
+{
+	arm_pm_idle = arch_idle;
+	return 0;
+}
+early_initcall(idle_initialize);
+#endif
diff --git a/drivers/soc/qcom/pil_bg_intf.h b/drivers/soc/qcom/pil_bg_intf.h
index 722024b..46aed25 100644
--- a/drivers/soc/qcom/pil_bg_intf.h
+++ b/drivers/soc/qcom/pil_bg_intf.h
@@ -36,7 +36,7 @@
 __packed struct tzapp_bg_rsp {
 	uint32_t tzapp_bg_cmd;
 	uint32_t bg_info_len;
-	uint32_t status;
+	int32_t status;
 	uint32_t bg_info[100];
 };
 
diff --git a/drivers/soc/qcom/pm-boot.c b/drivers/soc/qcom/pm-boot.c
new file mode 100644
index 0000000..f0daeba
--- /dev/null
+++ b/drivers/soc/qcom/pm-boot.c
@@ -0,0 +1,98 @@
+/* Copyright (c) 2011-2014, 2016, 2018, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <soc/qcom/scm-boot.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
+#include "pm-boot.h"
+#include "idle.h"
+
+#define CPU_INDEX(cluster, cpu) (cluster * MAX_CPUS_PER_CLUSTER + cpu)
+
+static void (*msm_pm_boot_before_pc)(unsigned int cpu, unsigned long entry);
+static void (*msm_pm_boot_after_pc)(unsigned int cpu);
+
+static int msm_pm_tz_boot_init(void)
+{
+	int ret;
+	phys_addr_t warmboot_addr = virt_to_phys(msm_pm_boot_entry);
+
+	if (scm_is_mc_boot_available())
+		ret = scm_set_warm_boot_addr_mc_for_all(warmboot_addr);
+	else {
+		unsigned int flag = 0;
+
+		if (num_possible_cpus() == 1)
+			flag = SCM_FLAG_WARMBOOT_CPU0;
+		else if (num_possible_cpus() == 2)
+			flag = SCM_FLAG_WARMBOOT_CPU0 | SCM_FLAG_WARMBOOT_CPU1;
+		else if (num_possible_cpus() == 4)
+			flag = SCM_FLAG_WARMBOOT_CPU0 | SCM_FLAG_WARMBOOT_CPU1 |
+				SCM_FLAG_WARMBOOT_CPU2 | SCM_FLAG_WARMBOOT_CPU3;
+		else
+			pr_warn("%s: set warmboot address failed\n",
+								__func__);
+
+		ret = scm_set_boot_addr(virt_to_phys(msm_pm_boot_entry), flag);
+	}
+	return ret;
+}
+static void msm_pm_write_boot_vector(unsigned int cpu, unsigned long address)
+{
+	uint32_t clust_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+	uint32_t cpu_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
+	unsigned long *start_address;
+	unsigned long *end_address;
+
+	if (clust_id >= MAX_NUM_CLUSTER || cpu_id >= MAX_CPUS_PER_CLUSTER)
+		WARN_ON(cpu);
+
+	msm_pm_boot_vector[CPU_INDEX(clust_id, cpu_id)] = address;
+	start_address = &msm_pm_boot_vector[CPU_INDEX(clust_id, cpu_id)];
+	end_address = &msm_pm_boot_vector[CPU_INDEX(clust_id, cpu_id + 1)];
+	dmac_clean_range((void *)start_address, (void *)end_address);
+}
+
+static void msm_pm_config_tz_before_pc(unsigned int cpu,
+		unsigned long entry)
+{
+	msm_pm_write_boot_vector(cpu, entry);
+}
+
+void msm_pm_boot_config_before_pc(unsigned int cpu, unsigned long entry)
+{
+	if (msm_pm_boot_before_pc)
+		msm_pm_boot_before_pc(cpu, entry);
+}
+
+void msm_pm_boot_config_after_pc(unsigned int cpu)
+{
+	if (msm_pm_boot_after_pc)
+		msm_pm_boot_after_pc(cpu);
+}
+
+static int __init msm_pm_boot_init(void)
+{
+	int ret = 0;
+
+	ret = msm_pm_tz_boot_init();
+	msm_pm_boot_before_pc = msm_pm_config_tz_before_pc;
+	msm_pm_boot_after_pc = NULL;
+
+	return ret;
+}
+postcore_initcall(msm_pm_boot_init);
diff --git a/drivers/soc/qcom/pm-boot.h b/drivers/soc/qcom/pm-boot.h
new file mode 100644
index 0000000..7ec053a
--- /dev/null
+++ b/drivers/soc/qcom/pm-boot.h
@@ -0,0 +1,19 @@
+/* Copyright (c) 2011-2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _ARCH_ARM_MACH_MSM_PM_BOOT_H
+#define _ARCH_ARM_MACH_MSM_PM_BOOT_H
+
+void msm_pm_boot_config_before_pc(unsigned int cpu, unsigned long entry);
+void msm_pm_boot_config_after_pc(unsigned int cpu);
+
+#endif
diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c
index 8668155..fb70a07 100644
--- a/drivers/soc/qcom/qdss_bridge.c
+++ b/drivers/soc/qcom/qdss_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -245,7 +245,7 @@
 
 	switch (event) {
 	case USB_QDSS_CONNECT:
-		usb_qdss_alloc_req(drvdata->usb_ch, poolsize, 0);
+		usb_qdss_alloc_req(ch, poolsize, 0);
 		mhi_queue_read(drvdata);
 		break;
 
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index e02bf84..fffef10 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -593,12 +593,18 @@
 	/* QCS605 ID */
 	[347] = {MSM_CPU_QCS605, "QCS605"},
 
+	/* SXR1130 ID */
+	[371] = {MSM_CPU_SXR1130, "SXR1130"},
+
 	/* SDA670 ID */
 	[337] = {MSM_CPU_SDA670, "SDA670"},
 
 	/* SDM710 ID */
 	[360] = {MSM_CPU_SDM710, "SDM710"},
 
+	/* SXR1120 ID */
+	[370] = {MSM_CPU_SXR1120, "SXR1120"},
+
 	/* 8953 ID */
 	[293] = {MSM_CPU_8953, "MSM8953"},
 	[304] = {MSM_CPU_8953, "APQ8053"},
@@ -615,6 +621,12 @@
 	[294] = {MSM_CPU_8937, "MSM8937"},
 	[295] = {MSM_CPU_8937, "APQ8937"},
 
+	/* MSM8917 IDs */
+	[303] = {MSM_CPU_8917, "MSM8917"},
+	[307] = {MSM_CPU_8917, "APQ8017"},
+	[308] = {MSM_CPU_8917, "MSM8217"},
+	[309] = {MSM_CPU_8917, "MSM8617"},
+
 	/* SDM429 and SDM439 ID*/
 	[353] = {MSM_CPU_SDM439, "SDM439"},
 	[354] = {MSM_CPU_SDM429, "SDM429"},
@@ -1532,6 +1544,10 @@
 		dummy_socinfo.id = 336;
 		strlcpy(dummy_socinfo.build_id, "sdm670 - ",
 			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sxr1130()) {
+		dummy_socinfo.id = 371;
+		strlcpy(dummy_socinfo.build_id, "sxr1130 - ",
+			sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_sdm710()) {
 		dummy_socinfo.id = 360;
 		strlcpy(dummy_socinfo.build_id, "sdm710 - ",
@@ -1544,6 +1560,10 @@
 		dummy_socinfo.id = 347;
 		strlcpy(dummy_socinfo.build_id, "qcs605 - ",
 			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sxr1120()) {
+		dummy_socinfo.id = 370;
+		strlcpy(dummy_socinfo.build_id, "sxr1120 - ",
+			sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_mdm9650()) {
 		dummy_socinfo.id = 286;
 		strlcpy(dummy_socinfo.build_id, "mdm9650 - ",
@@ -1560,6 +1580,10 @@
 		dummy_socinfo.id = 294;
 		strlcpy(dummy_socinfo.build_id, "msm8937 - ",
 			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8917()) {
+		dummy_socinfo.id = 303;
+		strlcpy(dummy_socinfo.build_id, "msm8917 - ",
+			sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_sdm450()) {
 		dummy_socinfo.id = 338;
 		strlcpy(dummy_socinfo.build_id, "sdm450 - ",
diff --git a/drivers/soc/qcom/spm_devices.c b/drivers/soc/qcom/spm_devices.c
index 5f1ac4e..268a8fe 100644
--- a/drivers/soc/qcom/spm_devices.c
+++ b/drivers/soc/qcom/spm_devices.c
@@ -234,7 +234,7 @@
 }
 
 static int msm_spm_dev_set_low_power_mode(struct msm_spm_device *dev,
-		unsigned int mode, bool notify_rpm)
+		unsigned int mode, bool notify_rpm, bool set_spm_enable)
 {
 	uint32_t i;
 	int ret = -EINVAL;
@@ -251,9 +251,11 @@
 	if (!dev->num_modes)
 		return 0;
 
-	if (mode == MSM_SPM_MODE_DISABLED) {
+	if (mode == MSM_SPM_MODE_DISABLED && set_spm_enable) {
 		ret = msm_spm_drv_set_spm_enable(&dev->reg_data, false);
-	} else if (!msm_spm_drv_set_spm_enable(&dev->reg_data, true)) {
+	} else {
+		if (set_spm_enable)
+			ret = msm_spm_drv_set_spm_enable(&dev->reg_data, true);
 		for (i = 0; i < dev->num_modes; i++) {
 			if (dev->modes[i].mode != mode)
 				continue;
@@ -539,10 +541,24 @@
 {
 	struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device);
 
-	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm);
+	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, true);
 }
 EXPORT_SYMBOL(msm_spm_set_low_power_mode);
 
+void msm_spm_set_rpm_hs(bool allow_rpm_hs)
+{
+	struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device);
+
+	dev->allow_rpm_hs = allow_rpm_hs;
+}
+EXPORT_SYMBOL(msm_spm_set_rpm_hs);
+
+int msm_spm_config_low_power_mode_addr(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm)
+{
+	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, false);
+}
+
 /**
  * msm_spm_init(): Board initalization function
  * @data: platform specific SPM register configuration data
@@ -586,7 +602,7 @@
 int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
 		unsigned int mode, bool notify_rpm)
 {
-	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm);
+	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, true);
 }
 #ifdef CONFIG_MSM_L2_SPM
 
diff --git a/drivers/soc/qcom/subsys-pil-bg.c b/drivers/soc/qcom/subsys-pil-bg.c
index 75c3666..070733e 100644
--- a/drivers/soc/qcom/subsys-pil-bg.c
+++ b/drivers/soc/qcom/subsys-pil-bg.c
@@ -30,6 +30,7 @@
 #include "peripheral-loader.h"
 #include "../../misc/qseecom_kernel.h"
 #include "pil_bg_intf.h"
+#include "bgcom_interface.h"
 
 #define INVALID_GPIO	-1
 #define NUM_GPIOS	4
@@ -37,7 +38,7 @@
 #define desc_to_data(d)	container_of(d, struct pil_bg_data, desc)
 #define subsys_to_data(d) container_of(d, struct pil_bg_data, subsys_desc)
 #define BG_RAMDUMP_SZ	0x00102000
-#define BG_CRASH_IN_TWM	2
+#define BG_CRASH_IN_TWM	-2
 /**
  * struct pil_bg_data
  * @qseecom_handle: handle of TZ app
@@ -90,9 +91,18 @@
 static void bg_app_shutdown_notify(const struct subsys_desc *subsys)
 {
 	struct pil_bg_data *bg_data = subsys_to_data(subsys);
+
+	/* Disable irq if already BG is up */
+	if (bg_data->is_ready) {
+		disable_irq(bg_data->status_irq);
+		disable_irq(bg_data->errfatal_irq);
+		bg_data->is_ready = false;
+	}
 	/* Toggle AP2BG err fatal gpio here to inform apps err fatal event */
-	if (gpio_is_valid(bg_data->gpios[2]))
+	if (gpio_is_valid(bg_data->gpios[2])) {
+		pr_debug("Sending Apps shutdown signal\n");
 		gpio_set_value(bg_data->gpios[2], 1);
+	}
 }
 
 /**
@@ -106,9 +116,18 @@
 {
 	struct pil_bg_data *bg_data = container_of(nb,
 					struct pil_bg_data, reboot_blk);
+
+	/* Disable irq if already BG is up */
+	if (bg_data->is_ready) {
+		disable_irq(bg_data->status_irq);
+		disable_irq(bg_data->errfatal_irq);
+		bg_data->is_ready = false;
+	}
 	/* Toggle AP2BG err fatal gpio here to inform apps err fatal event */
-	if (gpio_is_valid(bg_data->gpios[2]))
+	if (gpio_is_valid(bg_data->gpios[2])) {
+		pr_debug("Sending reboot signal\n");
 		gpio_set_value(bg_data->gpios[2], 1);
+	}
 	return NOTIFY_DONE;
 }
 
@@ -266,7 +285,6 @@
 		return ret;
 	}
 	enable_irq(bg_data->status_irq);
-	enable_irq(bg_data->errfatal_irq);
 	ret = wait_for_err_ready(bg_data);
 	if (ret) {
 		dev_err(bg_data->desc.dev,
@@ -289,10 +307,12 @@
 {
 	struct pil_bg_data *bg_data = subsys_to_data(subsys);
 
-	disable_irq(bg_data->status_irq);
-	devm_free_irq(bg_data->desc.dev, bg_data->status_irq, bg_data);
-	disable_irq(bg_data->errfatal_irq);
-	bg_data->is_ready = false;
+	if (bg_data->is_ready) {
+		disable_irq(bg_data->status_irq);
+		devm_free_irq(bg_data->desc.dev, bg_data->status_irq, bg_data);
+		disable_irq(bg_data->errfatal_irq);
+		bg_data->is_ready = false;
+	}
 	return 0;
 }
 
@@ -393,7 +413,9 @@
 	ret = bgpil_tzapp_comm(bg_data, &bg_tz_req);
 	if (bg_data->cmd_status == BG_CRASH_IN_TWM) {
 		/* Do ramdump and resend boot cmd */
-		bg_data->subsys_desc.ramdump(true, &bg_data->subsys_desc);
+		if (is_twm_exit())
+			bg_data->subsys_desc.ramdump(true,
+				&bg_data->subsys_desc);
 		bg_tz_req.tzapp_bg_cmd = BGPIL_DLOAD_CONT;
 		ret = bgpil_tzapp_comm(bg_data, &bg_tz_req);
 	}
@@ -524,7 +546,6 @@
 	} else if (value == false && drvdata->is_ready) {
 		dev_err(drvdata->desc.dev,
 			"BG got unexpected reset: irq state changed 1->0\n");
-			drvdata->is_ready = false;
 		queue_work(drvdata->bg_queue, &drvdata->restart_work);
 	} else {
 		dev_err(drvdata->desc.dev,
@@ -586,7 +607,6 @@
 		goto err;
 	}
 	drvdata->errfatal_irq = irq;
-	enable_irq(drvdata->errfatal_irq);
 	/* Configure outgoing GPIO's */
 	if (gpio_request(drvdata->gpios[2], "AP2BG_ERRFATAL")) {
 		dev_err(&pdev->dev,
diff --git a/drivers/soc/qcom/system_pm.c b/drivers/soc/qcom/system_pm.c
index 9c6edbf..1befdcf 100644
--- a/drivers/soc/qcom/system_pm.c
+++ b/drivers/soc/qcom/system_pm.c
@@ -22,6 +22,13 @@
 #define PDC_TIME_VALID_SHIFT	31
 #define PDC_TIME_UPPER_MASK	0xFFFFFF
 
+#ifdef CONFIG_ARM_GIC_V3
+#include <linux/irqchip/arm-gic-v3.h>
+#else
+static inline void gic_v3_dist_restore(void) {}
+static inline void gic_v3_dist_save(void) {}
+#endif
+
 static struct rpmh_client *rpmh_client;
 
 static int setup_wakeup(uint32_t lo, uint32_t hi)
@@ -61,6 +68,7 @@
  */
 static int system_sleep_enter(struct cpumask *mask)
 {
+	gic_v3_dist_save();
 	return rpmh_flush(rpmh_client);
 }
 
@@ -70,6 +78,7 @@
 static void system_sleep_exit(void)
 {
 	msm_rpmh_master_stats_update();
+	gic_v3_dist_restore();
 }
 
 static struct system_pm_ops pm_ops = {
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
index 520aedd..78d3dba 100644
--- a/drivers/soc/qcom/wcnss_ctrl.c
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -247,7 +247,7 @@
 		/* Increment for next fragment */
 		req->seq++;
 
-		data += req->hdr.len;
+		data += NV_FRAGMENT_SIZE;
 		left -= NV_FRAGMENT_SIZE;
 	} while (left > 0);
 
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 7d629b4..6323176 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -514,7 +514,7 @@
 
 static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
 {
-	if (!has_bspi(qspi) || (qspi->bspi_enabled))
+	if (!has_bspi(qspi))
 		return;
 
 	qspi->bspi_enabled = 1;
@@ -529,7 +529,7 @@
 
 static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
 {
-	if (!has_bspi(qspi) || (!qspi->bspi_enabled))
+	if (!has_bspi(qspi))
 		return;
 
 	qspi->bspi_enabled = 0;
@@ -543,16 +543,19 @@
 
 static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
 {
-	u32 data = 0;
+	u32 rd = 0;
+	u32 wr = 0;
 
-	if (qspi->curr_cs == cs)
-		return;
 	if (qspi->base[CHIP_SELECT]) {
-		data = bcm_qspi_read(qspi, CHIP_SELECT, 0);
-		data = (data & ~0xff) | (1 << cs);
-		bcm_qspi_write(qspi, CHIP_SELECT, 0, data);
+		rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
+		wr = (rd & ~0xff) | (1 << cs);
+		if (rd == wr)
+			return;
+		bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
 		usleep_range(10, 20);
 	}
+
+	dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
 	qspi->curr_cs = cs;
 }
 
@@ -770,8 +773,13 @@
 			dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
 		}
 		mspi_cdram = MSPI_CDRAM_CONT_BIT;
-		mspi_cdram |= (~(1 << spi->chip_select) &
-			       MSPI_CDRAM_PCS);
+
+		if (has_bspi(qspi))
+			mspi_cdram &= ~1;
+		else
+			mspi_cdram |= (~(1 << spi->chip_select) &
+				       MSPI_CDRAM_PCS);
+
 		mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
 				MSPI_CDRAM_BITSE_BIT);
 
@@ -1212,7 +1220,7 @@
 		qspi->base[MSPI]  = devm_ioremap_resource(dev, res);
 		if (IS_ERR(qspi->base[MSPI])) {
 			ret = PTR_ERR(qspi->base[MSPI]);
-			goto qspi_probe_err;
+			goto qspi_resource_err;
 		}
 	} else {
 		goto qspi_resource_err;
@@ -1223,7 +1231,7 @@
 		qspi->base[BSPI]  = devm_ioremap_resource(dev, res);
 		if (IS_ERR(qspi->base[BSPI])) {
 			ret = PTR_ERR(qspi->base[BSPI]);
-			goto qspi_probe_err;
+			goto qspi_resource_err;
 		}
 		qspi->bspi_mode = true;
 	} else {
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index dfa387c..1663d3b 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -89,7 +89,7 @@
 #define TIMESTAMP_AFTER		BIT(3)
 #define POST_CMD_DELAY		BIT(4)
 
-#define SPI_CORE2X_VOTE		(10000)
+#define SPI_CORE2X_VOTE		(7600)
 /* GSI CONFIG0 TRE Params */
 /* Flags bit fields */
 #define GSI_LOOPBACK_EN		(BIT(0))
@@ -155,6 +155,7 @@
 	int num_xfers;
 	void *ipc;
 	bool shared_se;
+	bool dis_autosuspend;
 };
 
 static struct spi_master *get_spi_master(struct device *dev)
@@ -311,7 +312,7 @@
 				struct spi_message *spi_msg)
 {
 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
-	int mode = FIFO_MODE;
+	int mode = SE_DMA;
 	int fifo_disable = (geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) &
 							FIFO_IF_DISABLE);
 	bool dma_chan_valid =
@@ -325,10 +326,10 @@
 	 */
 	if (fifo_disable && !dma_chan_valid)
 		mode = -EINVAL;
+	else if (!fifo_disable)
+		mode = SE_DMA;
 	else if (dma_chan_valid)
 		mode = GSI_DMA;
-	else
-		mode = FIFO_MODE;
 	return mode;
 }
 
@@ -715,25 +716,20 @@
 
 	mas->cur_xfer_mode = select_xfer_mode(spi, spi_msg);
 
-	if (mas->cur_xfer_mode == FIFO_MODE) {
-		geni_se_select_mode(mas->base, FIFO_MODE);
-		reinit_completion(&mas->xfer_done);
-		ret = setup_fifo_params(spi_msg->spi, spi);
+	if (mas->cur_xfer_mode < 0) {
+		dev_err(mas->dev, "%s: Couldn't select mode %d", __func__,
+							mas->cur_xfer_mode);
+		ret = -EINVAL;
 	} else if (mas->cur_xfer_mode == GSI_DMA) {
-		mas->num_tx_eot = 0;
-		mas->num_rx_eot = 0;
-		mas->num_xfers = 0;
-		reinit_completion(&mas->tx_cb);
-		reinit_completion(&mas->rx_cb);
 		memset(mas->gsi, 0,
 				(sizeof(struct spi_geni_gsi) * NUM_SPI_XFER));
 		geni_se_select_mode(mas->base, GSI_DMA);
 		ret = spi_geni_map_buf(mas, spi_msg);
 	} else {
-		dev_err(mas->dev, "%s: Couldn't select mode %d", __func__,
-							mas->cur_xfer_mode);
-		ret = -EINVAL;
+		geni_se_select_mode(mas->base, mas->cur_xfer_mode);
+		ret = setup_fifo_params(spi_msg->spi, spi);
 	}
+
 	return ret;
 }
 
@@ -752,13 +748,12 @@
 static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
 {
 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
-	int ret = 0;
+	int ret = 0, count = 0;
 	u32 max_speed = spi->cur_msg->spi->max_speed_hz;
 	struct se_geni_rsc *rsc = &mas->spi_rsc;
 
-	/* Adjust the AB/IB based on the max speed of the slave.*/
+	/* Adjust the IB based on the max speed of the slave.*/
 	rsc->ib = max_speed * DEFAULT_BUS_WIDTH;
-	rsc->ab = max_speed * DEFAULT_BUS_WIDTH;
 	if (mas->shared_se) {
 		struct se_geni_rsc *rsc;
 		int ret = 0;
@@ -780,7 +775,12 @@
 	} else {
 		ret = 0;
 	}
-
+	if (mas->dis_autosuspend) {
+		count = atomic_read(&mas->dev->power.usage_count);
+		if (count <= 0)
+			GENI_SE_ERR(mas->ipc, false, NULL,
+				"resume usage count mismatch:%d", count);
+	}
 	if (unlikely(!mas->setup)) {
 		int proto = get_se_proto(mas->base);
 		unsigned int major;
@@ -869,6 +869,9 @@
 		mas->shared_se =
 			(geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) &
 							FIFO_IF_DISABLE);
+		if (mas->dis_autosuspend)
+			GENI_SE_DBG(mas->ipc, false, mas->dev,
+					"Auto Suspend is disabled\n");
 	}
 exit_prepare_transfer_hardware:
 	return ret;
@@ -877,7 +880,7 @@
 static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi)
 {
 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
-
+	int count = 0;
 	if (mas->shared_se) {
 		struct se_geni_rsc *rsc;
 		int ret = 0;
@@ -890,8 +893,16 @@
 			"%s: Error %d pinctrl_select_state\n", __func__, ret);
 	}
 
-	pm_runtime_mark_last_busy(mas->dev);
-	pm_runtime_put_autosuspend(mas->dev);
+	if (mas->dis_autosuspend) {
+		pm_runtime_put_sync(mas->dev);
+		count = atomic_read(&mas->dev->power.usage_count);
+		if (count < 0)
+			GENI_SE_ERR(mas->ipc, false, NULL,
+				"suspend usage count mismatch:%d", count);
+	} else {
+		pm_runtime_mark_last_busy(mas->dev);
+		pm_runtime_put_autosuspend(mas->dev);
+	}
 	return 0;
 }
 
@@ -965,26 +976,64 @@
 		geni_write_reg(trans_len, mas->base, SE_SPI_RX_TRANS_LEN);
 		mas->rx_rem_bytes = xfer->len;
 	}
+
+	if (trans_len > (mas->tx_fifo_depth * mas->tx_fifo_width)) {
+		if (mas->cur_xfer_mode != SE_DMA) {
+			mas->cur_xfer_mode = SE_DMA;
+			geni_se_select_mode(mas->base, mas->cur_xfer_mode);
+		}
+	} else {
+		if (mas->cur_xfer_mode != FIFO_MODE) {
+			mas->cur_xfer_mode = FIFO_MODE;
+			geni_se_select_mode(mas->base, mas->cur_xfer_mode);
+		}
+	}
+
 	geni_write_reg(spi_tx_cfg, mas->base, SE_SPI_TRANS_CFG);
 	geni_setup_m_cmd(mas->base, m_cmd, m_param);
 	GENI_SE_DBG(mas->ipc, false, mas->dev,
-		"%s: trans_len %d xferlen%d tx_cfg 0x%x cmd 0x%x cs %d\n",
+		"%s: trans_len %d xferlen%d tx_cfg 0x%x cmd 0x%x cs%d mode%d\n",
 		__func__, trans_len, xfer->len, spi_tx_cfg, m_cmd,
-		xfer->cs_change);
-	if (m_cmd & SPI_TX_ONLY)
-		geni_write_reg(mas->tx_wm, mas->base, SE_GENI_TX_WATERMARK_REG);
+		xfer->cs_change, mas->cur_xfer_mode);
+	if ((m_cmd & SPI_RX_ONLY) && (mas->cur_xfer_mode == SE_DMA)) {
+		int ret = 0;
+
+		ret =  geni_se_rx_dma_prep(mas->wrapper_dev, mas->base,
+				xfer->rx_buf, xfer->len, &xfer->rx_dma);
+		if (ret)
+			GENI_SE_ERR(mas->ipc, true, mas->dev,
+				"Failed to setup Rx dma %d\n", ret);
+	}
+	if (m_cmd & SPI_TX_ONLY) {
+		if (mas->cur_xfer_mode == FIFO_MODE) {
+			geni_write_reg(mas->tx_wm, mas->base,
+					SE_GENI_TX_WATERMARK_REG);
+		} else if (mas->cur_xfer_mode == SE_DMA) {
+			int ret = 0;
+
+			ret =  geni_se_tx_dma_prep(mas->wrapper_dev, mas->base,
+					(void *)xfer->tx_buf, xfer->len,
+							&xfer->tx_dma);
+			if (ret)
+				GENI_SE_ERR(mas->ipc, true, mas->dev,
+					"Failed to setup tx dma %d\n", ret);
+		}
+	}
+
 	/* Ensure all writes are done before the WM interrupt */
 	mb();
 }
 
-static void handle_fifo_timeout(struct spi_geni_master *mas)
+static void handle_fifo_timeout(struct spi_geni_master *mas,
+					struct spi_transfer *xfer)
 {
 	unsigned long timeout;
 
 	geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
 	reinit_completion(&mas->xfer_done);
 	geni_cancel_m_cmd(mas->base);
-	geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
+	if (mas->cur_xfer_mode == FIFO_MODE)
+		geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
 	/* Ensure cmd cancel is written */
 	mb();
 	timeout = wait_for_completion_timeout(&mas->xfer_done, HZ);
@@ -999,6 +1048,15 @@
 			dev_err(mas->dev,
 				"Failed to cancel/abort m_cmd\n");
 	}
+	if (mas->cur_xfer_mode == SE_DMA) {
+		if (xfer->tx_buf)
+			geni_se_tx_dma_unprep(mas->wrapper_dev,
+					xfer->tx_dma, xfer->len);
+		if (xfer->rx_buf)
+			geni_se_rx_dma_unprep(mas->wrapper_dev,
+					xfer->rx_dma, xfer->len);
+	}
+
 }
 
 static int spi_geni_transfer_one(struct spi_master *spi,
@@ -1014,7 +1072,8 @@
 		return -EINVAL;
 	}
 
-	if (mas->cur_xfer_mode == FIFO_MODE) {
+	if (mas->cur_xfer_mode != GSI_DMA) {
+		reinit_completion(&mas->xfer_done);
 		setup_fifo_xfer(xfer, mas, slv->mode, spi);
 		timeout = wait_for_completion_timeout(&mas->xfer_done,
 					msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
@@ -1028,7 +1087,22 @@
 			ret = -ETIMEDOUT;
 			goto err_fifo_geni_transfer_one;
 		}
+
+		if (mas->cur_xfer_mode == SE_DMA) {
+			if (xfer->tx_buf)
+				geni_se_tx_dma_unprep(mas->wrapper_dev,
+					xfer->tx_dma, xfer->len);
+			if (xfer->rx_buf)
+				geni_se_rx_dma_unprep(mas->wrapper_dev,
+					xfer->rx_dma, xfer->len);
+		}
 	} else {
+		mas->num_tx_eot = 0;
+		mas->num_rx_eot = 0;
+		mas->num_xfers = 0;
+		reinit_completion(&mas->tx_cb);
+		reinit_completion(&mas->rx_cb);
+
 		setup_gsi_xfer(xfer, mas, slv, spi);
 		if ((mas->num_xfers >= NUM_SPI_XFER) ||
 			(list_is_last(&xfer->transfer_list,
@@ -1072,7 +1146,7 @@
 	dmaengine_terminate_all(mas->tx);
 	return ret;
 err_fifo_geni_transfer_one:
-	handle_fifo_timeout(mas);
+	handle_fifo_timeout(mas, xfer);
 	return ret;
 }
 
@@ -1188,33 +1262,59 @@
 		goto exit_geni_spi_irq;
 	}
 	m_irq = geni_read_reg(mas->base, SE_GENI_M_IRQ_STATUS);
-	if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
-		geni_spi_handle_rx(mas);
+	if (mas->cur_xfer_mode == FIFO_MODE) {
+		if ((m_irq & M_RX_FIFO_WATERMARK_EN) ||
+						(m_irq & M_RX_FIFO_LAST_EN))
+			geni_spi_handle_rx(mas);
 
-	if ((m_irq & M_TX_FIFO_WATERMARK_EN))
-		geni_spi_handle_tx(mas);
+		if ((m_irq & M_TX_FIFO_WATERMARK_EN))
+			geni_spi_handle_tx(mas);
 
-	if ((m_irq & M_CMD_DONE_EN) || (m_irq & M_CMD_CANCEL_EN) ||
-		(m_irq & M_CMD_ABORT_EN)) {
-		complete(&mas->xfer_done);
-		/*
-		 * If this happens, then a CMD_DONE came before all the buffer
-		 * bytes were sent out. This is unusual, log this condition and
-		 * disable the WM interrupt to prevent the system from stalling
-		 * due an interrupt storm.
-		 * If this happens when all Rx bytes haven't been received, log
-		 * the condition.
-		 */
-		if (mas->tx_rem_bytes) {
-			geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
-			GENI_SE_DBG(mas->ipc, false, mas->dev,
-				"%s:Premature Done.tx_rem%d bpw%d\n",
-				__func__, mas->tx_rem_bytes, mas->cur_word_len);
+		if ((m_irq & M_CMD_DONE_EN) || (m_irq & M_CMD_CANCEL_EN) ||
+			(m_irq & M_CMD_ABORT_EN)) {
+			complete(&mas->xfer_done);
+			/*
+			 * If this happens, then a CMD_DONE came before all the
+			 * buffer bytes were sent out. This is unusual, log this
+			 * condition and disable the WM interrupt to prevent the
+			 * system from stalling due an interrupt storm.
+			 * If this happens when all Rx bytes haven't been
+			 * received, log the condition.
+			 */
+			if (mas->tx_rem_bytes) {
+				geni_write_reg(0, mas->base,
+						SE_GENI_TX_WATERMARK_REG);
+				GENI_SE_DBG(mas->ipc, false, mas->dev,
+					"%s:Premature Done.tx_rem%d bpw%d\n",
+					__func__, mas->tx_rem_bytes,
+						mas->cur_word_len);
+			}
+			if (mas->rx_rem_bytes)
+				GENI_SE_DBG(mas->ipc, false, mas->dev,
+					"%s:Premature Done.rx_rem%d bpw%d\n",
+						__func__, mas->rx_rem_bytes,
+							mas->cur_word_len);
 		}
-		if (mas->rx_rem_bytes)
-			GENI_SE_DBG(mas->ipc, false, mas->dev,
-				"%s:Premature Done.rx_rem%d bpw%d\n",
-				__func__, mas->rx_rem_bytes, mas->cur_word_len);
+	} else if (mas->cur_xfer_mode == SE_DMA) {
+		u32 dma_tx_status = geni_read_reg(mas->base,
+							SE_DMA_TX_IRQ_STAT);
+		u32 dma_rx_status = geni_read_reg(mas->base,
+							SE_DMA_RX_IRQ_STAT);
+
+		if (dma_tx_status)
+			geni_write_reg(dma_tx_status, mas->base,
+						SE_DMA_TX_IRQ_CLR);
+		if (dma_rx_status)
+			geni_write_reg(dma_rx_status, mas->base,
+						SE_DMA_RX_IRQ_CLR);
+		if (dma_tx_status & TX_DMA_DONE)
+			mas->tx_rem_bytes = 0;
+		if (dma_rx_status & RX_DMA_DONE)
+			mas->rx_rem_bytes = 0;
+		if (!mas->tx_rem_bytes && !mas->rx_rem_bytes)
+			complete(&mas->xfer_done);
+		if ((m_irq & M_CMD_CANCEL_EN) || (m_irq & M_CMD_ABORT_EN))
+			complete(&mas->xfer_done);
 	}
 exit_geni_spi_irq:
 	geni_write_reg(m_irq, mas->base, SE_GENI_M_IRQ_CLEAR);
@@ -1328,7 +1428,9 @@
 	rt_pri = of_property_read_bool(pdev->dev.of_node, "qcom,rt");
 	if (rt_pri)
 		spi->rt = true;
-
+	geni_mas->dis_autosuspend =
+		of_property_read_bool(pdev->dev.of_node,
+				"qcom,disable-autosuspend");
 	geni_mas->phys_addr = res->start;
 	geni_mas->size = resource_size(res);
 	geni_mas->base = devm_ioremap(&pdev->dev, res->start,
@@ -1368,8 +1470,11 @@
 	init_completion(&geni_mas->tx_cb);
 	init_completion(&geni_mas->rx_cb);
 	pm_runtime_set_suspended(&pdev->dev);
-	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTO_SUSPEND_DELAY);
-	pm_runtime_use_autosuspend(&pdev->dev);
+	if (!geni_mas->dis_autosuspend) {
+		pm_runtime_set_autosuspend_delay(&pdev->dev,
+					SPI_AUTO_SUSPEND_DELAY);
+		pm_runtime_use_autosuspend(&pdev->dev);
+	}
 	pm_runtime_enable(&pdev->dev);
 	ret = spi_register_master(spi);
 	if (ret) {
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index ce31b81..b8e004d 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -38,7 +38,7 @@
 
 	/* SSP register addresses */
 	void __iomem *ioaddr;
-	u32 ssdr_physical;
+	phys_addr_t ssdr_physical;
 
 	/* SSP masks*/
 	u32 dma_cr1;
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 6199523..0f0b7ba 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1449,6 +1449,7 @@
 	.driver		= {
 		.name	= "spmi_pmic_arb",
 		.of_match_table = spmi_pmic_arb_match_table,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
 	},
 };
 
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index dd64148..edfb680 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -39,7 +39,6 @@
    waiting threads. We should eventually use multiple queues and select the
    queue based on the region.
  - Add debugfs support for examining the permissions of regions.
- - Use ioremap_wc instead of ioremap_nocache.
  - Remove VSOC_WAIT_FOR_INCOMING_INTERRUPT ioctl. This functionality has been
    superseded by the futex and is there for legacy reasons.
 
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index c1103c7..6000707 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1003,7 +1003,6 @@
 	struct ion_device *dev = client->dev;
 	struct rb_node *n;
 
-	pr_debug("%s: %d\n", __func__, __LINE__);
 	down_write(&dev->lock);
 	rb_erase(&client->node, &dev->clients);
 	up_write(&dev->lock);
@@ -1213,9 +1212,6 @@
 	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
 	int i;
 
-	pr_debug("%s: syncing for device %s\n", __func__,
-		 dev ? dev_name(dev) : "null");
-
 	if (!ion_buffer_fault_user_mappings(buffer))
 		return;
 
@@ -1269,7 +1265,6 @@
 	mutex_lock(&buffer->lock);
 	list_add(&vma_list->list, &buffer->vmas);
 	mutex_unlock(&buffer->lock);
-	pr_debug("%s: adding %pK\n", __func__, vma);
 }
 
 static void ion_vm_close(struct vm_area_struct *vma)
@@ -1277,14 +1272,12 @@
 	struct ion_buffer *buffer = vma->vm_private_data;
 	struct ion_vma_list *vma_list, *tmp;
 
-	pr_debug("%s\n", __func__);
 	mutex_lock(&buffer->lock);
 	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
 		if (vma_list->vma != vma)
 			continue;
 		list_del(&vma_list->list);
 		kfree(vma_list);
-		pr_debug("%s: deleting %pK\n", __func__, vma);
 		break;
 	}
 	mutex_unlock(&buffer->lock);
@@ -1680,7 +1673,6 @@
 {
 	struct ion_client *client = file->private_data;
 
-	pr_debug("%s: %d\n", __func__, __LINE__);
 	ion_client_destroy(client);
 	return 0;
 }
@@ -1692,7 +1684,6 @@
 	struct ion_client *client;
 	char debug_name[64];
 
-	pr_debug("%s: %d\n", __func__, __LINE__);
 	snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
 	client = ion_client_create(dev, debug_name);
 	if (IS_ERR(client))
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 72f2b6a..d991b02 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -81,8 +81,6 @@
 	struct device *dev = heap->priv;
 	struct ion_cma_buffer_info *info;
 
-	dev_dbg(dev, "Request buffer allocation len %ld\n", len);
-
 	info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
 	if (!info)
 		return ION_CMA_ALLOCATE_FAILED;
@@ -123,7 +121,6 @@
 
 	/* keep this for memory release */
 	buffer->priv_virt = info;
-	dev_dbg(dev, "Allocate buffer %pK\n", buffer);
 	return 0;
 
 err:
@@ -137,7 +134,6 @@
 	struct ion_cma_buffer_info *info = buffer->priv_virt;
 	unsigned long attrs = 0;
 
-	dev_dbg(dev, "Release buffer %pK\n", buffer);
 	/* release memory */
 	if (info->is_cached)
 		attrs |= DMA_ATTR_FORCE_COHERENT;
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 3ac71ed..3b27252 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -756,8 +756,10 @@
 {
 	int i;
 	for (i = 0; i < num_orders; i++)
-		if (pools[i])
+		if (pools[i]) {
 			ion_page_pool_destroy(pools[i]);
+			pools[i] = NULL;
+		}
 }
 
 /**
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index a1602e4..027094f 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -63,6 +63,10 @@
 #define CREATE_TRACE_POINTS
 #include "trace/lowmemorykiller.h"
 
+/* to enable lowmemorykiller */
+static int enable_lmk = 1;
+module_param_named(enable_lmk, enable_lmk, int, 0644);
+
 static u32 lowmem_debug_level = 1;
 static short lowmem_adj[6] = {
 	0,
@@ -93,6 +97,9 @@
 static unsigned long lowmem_count(struct shrinker *s,
 				  struct shrink_control *sc)
 {
+	if (!enable_lmk)
+		return 0;
+
 	return global_node_page_state(NR_ACTIVE_ANON) +
 		global_node_page_state(NR_ACTIVE_FILE) +
 		global_node_page_state(NR_INACTIVE_ANON) +
diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c
index 587c66d7..954ed2c 100644
--- a/drivers/staging/android/vsoc.c
+++ b/drivers/staging/android/vsoc.c
@@ -81,8 +81,8 @@
 	atomic_t *incoming_signalled;
 	/* Flag indicating the guest has signalled the host. */
 	atomic_t *outgoing_signalled;
-	int irq_requested;
-	int device_created;
+	bool irq_requested;
+	bool device_created;
 };
 
 struct vsoc_device {
@@ -91,7 +91,7 @@
 	/* Physical address of SHARED_MEMORY_BAR. */
 	phys_addr_t shm_phys_start;
 	/* Kernel virtual address of SHARED_MEMORY_BAR. */
-	void *kernel_mapped_shm;
+	void __iomem *kernel_mapped_shm;
 	/* Size of the entire shared memory window in bytes. */
 	size_t shm_size;
 	/*
@@ -116,22 +116,23 @@
 	 * vsoc_region_data because the kernel deals with them as an array.
 	 */
 	struct msix_entry *msix_entries;
-	/*
-	 * Flags that indicate what we've initialzied. These are used to do an
-	 * orderly cleanup of the device.
-	 */
-	char enabled_device;
-	char requested_regions;
-	char cdev_added;
-	char class_added;
-	char msix_enabled;
 	/* Mutex that protectes the permission list */
 	struct mutex mtx;
 	/* Major number assigned by the kernel */
 	int major;
-
+	/* Character device assigned by the kernel */
 	struct cdev cdev;
+	/* Device class assigned by the kernel */
 	struct class *class;
+	/*
+	 * Flags that indicate what we've initialized. These are used to do an
+	 * orderly cleanup of the device.
+	 */
+	bool enabled_device;
+	bool requested_regions;
+	bool cdev_added;
+	bool class_added;
+	bool msix_enabled;
 };
 
 static struct vsoc_device vsoc_dev;
@@ -153,13 +154,13 @@
 static int vsoc_mmap(struct file *, struct vm_area_struct *);
 static int vsoc_open(struct inode *, struct file *);
 static int vsoc_release(struct inode *, struct file *);
-static ssize_t vsoc_read(struct file *, char *, size_t, loff_t *);
-static ssize_t vsoc_write(struct file *, const char *, size_t, loff_t *);
+static ssize_t vsoc_read(struct file *, char __user *, size_t, loff_t *);
+static ssize_t vsoc_write(struct file *, const char __user *, size_t, loff_t *);
 static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin);
 static int do_create_fd_scoped_permission(
 	struct vsoc_device_region *region_p,
 	struct fd_scoped_permission_node *np,
-	struct fd_scoped_permission_arg *__user arg);
+	struct fd_scoped_permission_arg __user *arg);
 static void do_destroy_fd_scoped_permission(
 	struct vsoc_device_region *owner_region_p,
 	struct fd_scoped_permission *perm);
@@ -198,7 +199,7 @@
 /* Converts from shared memory offset to virtual address */
 static inline void *shm_off_to_virtual_addr(__u32 offset)
 {
-	return vsoc_dev.kernel_mapped_shm + offset;
+	return (void __force *)vsoc_dev.kernel_mapped_shm + offset;
 }
 
 /* Converts from shared memory offset to physical address */
@@ -261,7 +262,7 @@
 static int do_create_fd_scoped_permission(
 	struct vsoc_device_region *region_p,
 	struct fd_scoped_permission_node *np,
-	struct fd_scoped_permission_arg *__user arg)
+	struct fd_scoped_permission_arg __user *arg)
 {
 	struct file *managed_filp;
 	s32 managed_fd;
@@ -632,11 +633,11 @@
 	return 0;
 }
 
-static ssize_t vsoc_read(struct file *filp, char *buffer, size_t len,
+static ssize_t vsoc_read(struct file *filp, char __user *buffer, size_t len,
 			 loff_t *poffset)
 {
 	__u32 area_off;
-	void *area_p;
+	const void *area_p;
 	ssize_t area_len;
 	int retval = vsoc_validate_filep(filp);
 
@@ -706,7 +707,7 @@
 	return offset;
 }
 
-static ssize_t vsoc_write(struct file *filp, const char *buffer,
+static ssize_t vsoc_write(struct file *filp, const char __user *buffer,
 			  size_t len, loff_t *poffset)
 {
 	__u32 area_off;
@@ -772,14 +773,14 @@
 			pci_name(pdev), result);
 		return result;
 	}
-	vsoc_dev.enabled_device = 1;
+	vsoc_dev.enabled_device = true;
 	result = pci_request_regions(pdev, "vsoc");
 	if (result < 0) {
 		dev_err(&pdev->dev, "pci_request_regions failed\n");
 		vsoc_remove_device(pdev);
 		return -EBUSY;
 	}
-	vsoc_dev.requested_regions = 1;
+	vsoc_dev.requested_regions = true;
 	/* Set up the control registers in BAR 0 */
 	reg_size = pci_resource_len(pdev, REGISTER_BAR);
 	if (reg_size > MAX_REGISTER_BAR_LEN)
@@ -790,7 +791,7 @@
 
 	if (!vsoc_dev.regs) {
 		dev_err(&pdev->dev,
-			"cannot ioremap registers of size %zu\n",
+			"cannot map registers of size %zu\n",
 		       (size_t)reg_size);
 		vsoc_remove_device(pdev);
 		return -EBUSY;
@@ -800,19 +801,17 @@
 	vsoc_dev.shm_phys_start = pci_resource_start(pdev, SHARED_MEMORY_BAR);
 	vsoc_dev.shm_size = pci_resource_len(pdev, SHARED_MEMORY_BAR);
 
-	dev_info(&pdev->dev, "shared memory @ DMA %p size=0x%zx\n",
-		 (void *)vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
-	/* TODO(ghartman): ioremap_wc should work here */
-	vsoc_dev.kernel_mapped_shm = ioremap_nocache(
-			vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
+	dev_info(&pdev->dev, "shared memory @ DMA %pa size=0x%zx\n",
+		 &vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
+	vsoc_dev.kernel_mapped_shm = pci_iomap_wc(pdev, SHARED_MEMORY_BAR, 0);
 	if (!vsoc_dev.kernel_mapped_shm) {
 		dev_err(&vsoc_dev.dev->dev, "cannot iomap region\n");
 		vsoc_remove_device(pdev);
 		return -EBUSY;
 	}
 
-	vsoc_dev.layout =
-	    (struct vsoc_shm_layout_descriptor *)vsoc_dev.kernel_mapped_shm;
+	vsoc_dev.layout = (struct vsoc_shm_layout_descriptor __force *)
+				vsoc_dev.kernel_mapped_shm;
 	dev_info(&pdev->dev, "major_version: %d\n",
 		 vsoc_dev.layout->major_version);
 	dev_info(&pdev->dev, "minor_version: %d\n",
@@ -843,16 +842,16 @@
 		vsoc_remove_device(pdev);
 		return -EBUSY;
 	}
-	vsoc_dev.cdev_added = 1;
+	vsoc_dev.cdev_added = true;
 	vsoc_dev.class = class_create(THIS_MODULE, VSOC_DEV_NAME);
 	if (IS_ERR(vsoc_dev.class)) {
 		dev_err(&vsoc_dev.dev->dev, "class_create failed\n");
 		vsoc_remove_device(pdev);
 		return PTR_ERR(vsoc_dev.class);
 	}
-	vsoc_dev.class_added = 1;
-	vsoc_dev.regions = (struct vsoc_device_region *)
-		(vsoc_dev.kernel_mapped_shm +
+	vsoc_dev.class_added = true;
+	vsoc_dev.regions = (struct vsoc_device_region __force *)
+		((void *)vsoc_dev.layout +
 		 vsoc_dev.layout->vsoc_region_desc_offset);
 	vsoc_dev.msix_entries = kcalloc(
 			vsoc_dev.layout->region_count,
@@ -912,7 +911,7 @@
 			return -EFAULT;
 		}
 	}
-	vsoc_dev.msix_enabled = 1;
+	vsoc_dev.msix_enabled = true;
 	for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
 		const struct vsoc_device_region *region = vsoc_dev.regions + i;
 		size_t name_sz = sizeof(vsoc_dev.regions_data[i].name) - 1;
@@ -930,14 +929,11 @@
 				&vsoc_dev.regions_data[i].interrupt_wait_queue);
 		init_waitqueue_head(&vsoc_dev.regions_data[i].futex_wait_queue);
 		vsoc_dev.regions_data[i].incoming_signalled =
-			vsoc_dev.kernel_mapped_shm +
-			region->region_begin_offset +
+			shm_off_to_virtual_addr(region->region_begin_offset) +
 			h_to_g_signal_table->interrupt_signalled_offset;
 		vsoc_dev.regions_data[i].outgoing_signalled =
-			vsoc_dev.kernel_mapped_shm +
-			region->region_begin_offset +
+			shm_off_to_virtual_addr(region->region_begin_offset) +
 			g_to_h_signal_table->interrupt_signalled_offset;
-
 		result = request_irq(
 				vsoc_dev.msix_entries[i].vector,
 				vsoc_interrupt, 0,
@@ -950,7 +946,7 @@
 			vsoc_remove_device(pdev);
 			return -ENOSPC;
 		}
-		vsoc_dev.regions_data[i].irq_requested = 1;
+		vsoc_dev.regions_data[i].irq_requested = true;
 		if (!device_create(vsoc_dev.class, NULL,
 				   MKDEV(vsoc_dev.major, i),
 				   NULL, vsoc_dev.regions_data[i].name)) {
@@ -958,7 +954,7 @@
 			vsoc_remove_device(pdev);
 			return -EBUSY;
 		}
-		vsoc_dev.regions_data[i].device_created = 1;
+		vsoc_dev.regions_data[i].device_created = true;
 	}
 	return 0;
 }
@@ -990,51 +986,51 @@
 			if (vsoc_dev.regions_data[i].device_created) {
 				device_destroy(vsoc_dev.class,
 					       MKDEV(vsoc_dev.major, i));
-				vsoc_dev.regions_data[i].device_created = 0;
+				vsoc_dev.regions_data[i].device_created = false;
 			}
 			if (vsoc_dev.regions_data[i].irq_requested)
 				free_irq(vsoc_dev.msix_entries[i].vector, NULL);
-			vsoc_dev.regions_data[i].irq_requested = 0;
+			vsoc_dev.regions_data[i].irq_requested = false;
 		}
 		kfree(vsoc_dev.regions_data);
-		vsoc_dev.regions_data = 0;
+		vsoc_dev.regions_data = NULL;
 	}
 	if (vsoc_dev.msix_enabled) {
 		pci_disable_msix(pdev);
-		vsoc_dev.msix_enabled = 0;
+		vsoc_dev.msix_enabled = false;
 	}
 	kfree(vsoc_dev.msix_entries);
-	vsoc_dev.msix_entries = 0;
-	vsoc_dev.regions = 0;
+	vsoc_dev.msix_entries = NULL;
+	vsoc_dev.regions = NULL;
 	if (vsoc_dev.class_added) {
 		class_destroy(vsoc_dev.class);
-		vsoc_dev.class_added = 0;
+		vsoc_dev.class_added = false;
 	}
 	if (vsoc_dev.cdev_added) {
 		cdev_del(&vsoc_dev.cdev);
-		vsoc_dev.cdev_added = 0;
+		vsoc_dev.cdev_added = false;
 	}
 	if (vsoc_dev.major && vsoc_dev.layout) {
 		unregister_chrdev_region(MKDEV(vsoc_dev.major, 0),
 					 vsoc_dev.layout->region_count);
 		vsoc_dev.major = 0;
 	}
-	vsoc_dev.layout = 0;
+	vsoc_dev.layout = NULL;
 	if (vsoc_dev.kernel_mapped_shm) {
 		pci_iounmap(pdev, vsoc_dev.kernel_mapped_shm);
-		vsoc_dev.kernel_mapped_shm = 0;
+		vsoc_dev.kernel_mapped_shm = NULL;
 	}
 	if (vsoc_dev.regs) {
 		pci_iounmap(pdev, vsoc_dev.regs);
-		vsoc_dev.regs = 0;
+		vsoc_dev.regs = NULL;
 	}
 	if (vsoc_dev.requested_regions) {
 		pci_release_regions(pdev);
-		vsoc_dev.requested_regions = 0;
+		vsoc_dev.requested_regions = false;
 	}
 	if (vsoc_dev.enabled_device) {
 		pci_disable_device(pdev);
-		vsoc_dev.enabled_device = 0;
+		vsoc_dev.enabled_device = false;
 	}
 	/* Do this last: it indicates that the device is not initialized. */
 	vsoc_dev.dev = NULL;
diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
index eaeb3c5..cb95c3e 100644
--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
@@ -75,6 +75,8 @@
 
 	for (np = of_find_matching_node(NULL, its_device_id); np;
 	     np = of_find_matching_node(np, its_device_id)) {
+		if (!of_device_is_available(np))
+			continue;
 		if (!of_property_read_bool(np, "msi-controller"))
 			continue;
 
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index f6fc4dd..722c33f 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -253,7 +253,7 @@
 	struct sptlrpc_flavor    cl_flvr_mgc;   /* fixed flavor of mgc->mgs */
 
 	/* the grant values are protected by loi_list_lock below */
-	unsigned long		 cl_dirty_pages;	/* all _dirty_ in pahges */
+	unsigned long		 cl_dirty_pages;	/* all _dirty_ in pages */
 	unsigned long		 cl_dirty_max_pages;	/* allowed w/o rpc */
 	unsigned long		 cl_dirty_transit;	/* dirty synchronous */
 	unsigned long		 cl_avail_grant;	/* bytes of credit for ost */
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index cd19ce8..9e63171 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -2928,7 +2928,7 @@
 	if (lsm && !lmm) {
 		int i;
 
-		for (i = 1; i < lsm->lsm_md_stripe_count; i++) {
+		for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
 			/*
 			 * For migrating inode, the master stripe and master
 			 * object will be the same, so do not need iput, see
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 4bbe219..1a8c9f5 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -1542,7 +1542,7 @@
 	if (rc < 0)
 		return 0;
 
-	if (cli->cl_dirty_pages <= cli->cl_dirty_max_pages &&
+	if (cli->cl_dirty_pages < cli->cl_dirty_max_pages &&
 	    atomic_long_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
 		osc_consume_write_grant(cli, &oap->oap_brw_page);
 		if (transient) {
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 457eeb5..5fe9593 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -1705,6 +1705,8 @@
 
 		priv->rx_urb[16] = usb_alloc_urb(0, GFP_KERNEL);
 		priv->oldaddr = kmalloc(16, GFP_KERNEL);
+		if (!priv->oldaddr)
+			return -ENOMEM;
 		oldaddr = priv->oldaddr;
 		align = ((long)oldaddr) & 3;
 		if (align) {
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 1259654..ae24a68 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -58,4 +58,4 @@
 obj-$(CONFIG_MTK_THERMAL)	+= mtk_thermal.o
 obj-$(CONFIG_GENERIC_ADC_THERMAL)	+= thermal-generic-adc.o
 obj-$(CONFIG_THERMAL_QPNP_ADC_TM)	+= qpnp-adc-tm.o
-obj-$(CONFIG_THERMAL_TSENS)	+= msm-tsens.o tsens2xxx.o tsens-dbg.o tsens-mtc.o tsens1xxx.o
+obj-$(CONFIG_THERMAL_TSENS)	+= msm-tsens.o tsens2xxx.o tsens-dbg.o tsens-mtc.o tsens1xxx.o tsens_calib.o
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 2c4a63a..02f93f4 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -224,7 +224,7 @@
 static int cpufreq_cooling_pm_notify(struct notifier_block *nb,
 				unsigned long mode, void *_unused)
 {
-	struct cpufreq_cooling_device *cpufreq_dev;
+	struct cpufreq_cooling_device *cpufreq_dev, *next;
 	unsigned int cpu;
 
 	switch (mode) {
@@ -236,8 +236,8 @@
 	case PM_POST_HIBERNATION:
 	case PM_POST_RESTORE:
 	case PM_POST_SUSPEND:
-		mutex_lock(&cooling_list_lock);
-		list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+		list_for_each_entry_safe(cpufreq_dev, next, &cpufreq_dev_list,
+						node) {
 			mutex_lock(&core_isolate_lock);
 			if (cpufreq_dev->cpufreq_state ==
 				cpufreq_dev->max_level) {
@@ -259,7 +259,6 @@
 			}
 			mutex_unlock(&core_isolate_lock);
 		}
-		mutex_unlock(&cooling_list_lock);
 
 		atomic_set(&in_suspend, 0);
 		break;
diff --git a/drivers/thermal/gov_low_limits.c b/drivers/thermal/gov_low_limits.c
index 278869c..d02ea26 100644
--- a/drivers/thermal/gov_low_limits.c
+++ b/drivers/thermal/gov_low_limits.c
@@ -62,19 +62,30 @@
 		dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
 					old_target, (int)instance->target);
 
-		if (old_target == instance->target)
+		if (instance->initialized && old_target == instance->target)
 			continue;
 
-		if (old_target == THERMAL_NO_TARGET &&
+		if (!instance->initialized) {
+			if (instance->target != THERMAL_NO_TARGET) {
+				trace_thermal_zone_trip(tz, trip, trip_type,
+							true);
+				tz->passive += 1;
+			}
+		} else {
+			if (old_target == THERMAL_NO_TARGET &&
 				instance->target != THERMAL_NO_TARGET) {
-			trace_thermal_zone_trip(tz, trip, trip_type, true);
-			tz->passive += 1;
-		} else if (old_target != THERMAL_NO_TARGET &&
+				trace_thermal_zone_trip(tz, trip, trip_type,
+							true);
+				tz->passive += 1;
+			} else if (old_target != THERMAL_NO_TARGET &&
 				instance->target == THERMAL_NO_TARGET) {
-			trace_thermal_zone_trip(tz, trip, trip_type, false);
-			tz->passive -= 1;
+				trace_thermal_zone_trip(tz, trip, trip_type,
+							false);
+				tz->passive -= 1;
+			}
 		}
 
+		instance->initialized = true;
 		instance->cdev->updated = false; /* cdev needs update */
 	}
 
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index c137d3d..6241ef6 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -90,6 +90,9 @@
 	{	.compatible = "qcom,msm8937-tsens",
 		.data = &data_tsens14xx,
 	},
+	{	.compatible = "qcom,msm8909-tsens",
+		.data = &data_tsens1xxx_8909,
+	},
 	{}
 };
 MODULE_DEVICE_TABLE(of, tsens_table);
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index ad1186d..a45810b 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -185,6 +185,7 @@
  * @regulator: pointer to the TMU regulator structure.
  * @reg_conf: pointer to structure to register with core thermal.
  * @ntrip: number of supported trip points.
+ * @enabled: current status of TMU device
  * @tmu_initialize: SoC specific TMU initialization method
  * @tmu_control: SoC specific TMU control method
  * @tmu_read: SoC specific TMU temperature read method
@@ -205,6 +206,7 @@
 	struct regulator *regulator;
 	struct thermal_zone_device *tzd;
 	unsigned int ntrip;
+	bool enabled;
 
 	int (*tmu_initialize)(struct platform_device *pdev);
 	void (*tmu_control)(struct platform_device *pdev, bool on);
@@ -398,6 +400,7 @@
 	mutex_lock(&data->lock);
 	clk_enable(data->clk);
 	data->tmu_control(pdev, on);
+	data->enabled = on;
 	clk_disable(data->clk);
 	mutex_unlock(&data->lock);
 }
@@ -889,19 +892,24 @@
 static int exynos_get_temp(void *p, int *temp)
 {
 	struct exynos_tmu_data *data = p;
+	int value, ret = 0;
 
-	if (!data || !data->tmu_read)
+	if (!data || !data->tmu_read || !data->enabled)
 		return -EINVAL;
 
 	mutex_lock(&data->lock);
 	clk_enable(data->clk);
 
-	*temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
+	value = data->tmu_read(data);
+	if (value < 0)
+		ret = value;
+	else
+		*temp = code_to_temp(data, value) * MCELSIUS;
 
 	clk_disable(data->clk);
 	mutex_unlock(&data->lock);
 
-	return 0;
+	return ret;
 }
 
 #ifdef CONFIG_THERMAL_EMULATION
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 4bbb47a..4d75f6b 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -190,16 +190,26 @@
 		if (instance->initialized && old_target == instance->target)
 			continue;
 
-		/* Activate a passive thermal instance */
-		if (old_target == THERMAL_NO_TARGET &&
-			instance->target != THERMAL_NO_TARGET) {
-			update_passive_instance(tz, trip_type, 1);
-			trace_thermal_zone_trip(tz, trip, trip_type, true);
-		/* Deactivate a passive thermal instance */
-		} else if (old_target != THERMAL_NO_TARGET &&
-			instance->target == THERMAL_NO_TARGET) {
-			update_passive_instance(tz, trip_type, -1);
-			trace_thermal_zone_trip(tz, trip, trip_type, false);
+		if (!instance->initialized) {
+			if (instance->target != THERMAL_NO_TARGET) {
+				trace_thermal_zone_trip(tz, trip, trip_type,
+							true);
+				update_passive_instance(tz, trip_type, 1);
+			}
+		} else {
+			/* Activate a passive thermal instance */
+			if (old_target == THERMAL_NO_TARGET &&
+				instance->target != THERMAL_NO_TARGET) {
+				trace_thermal_zone_trip(tz, trip, trip_type,
+							true);
+				update_passive_instance(tz, trip_type, 1);
+			/* Deactivate a passive thermal instance */
+			} else if (old_target != THERMAL_NO_TARGET &&
+				instance->target == THERMAL_NO_TARGET) {
+				trace_thermal_zone_trip(tz, trip, trip_type,
+							false);
+				update_passive_instance(tz, trip_type, -1);
+			}
 		}
 
 		instance->initialized = true;
diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h
index d35b867..c8e6233 100644
--- a/drivers/thermal/tsens.h
+++ b/drivers/thermal/tsens.h
@@ -23,7 +23,8 @@
 
 #define DEBUG_SIZE				10
 #define TSENS_MAX_SENSORS			16
-#define TSENS_1x_MAX_SENSORS			11
+#define TSENS_NUM_SENSORS_8937			11
+#define TSENS_NUM_SENSORS_8909			5
 #define TSENS_CONTROLLER_ID(n)			(n)
 #define TSENS_CTRL_ADDR(n)			(n)
 #define TSENS_TM_SN_STATUS(n)			((n) + 0xa0)
@@ -32,6 +33,9 @@
 #define ONE_PT_CALIB2		0x2
 #define TWO_PT_CALIB		0x3
 
+#define SLOPE_FACTOR		1000
+#define SLOPE_DEFAULT		3200
+
 enum tsens_dbg_type {
 	TSENS_DBG_POLL,
 	TSENS_DBG_LOG_TEMP_READS,
@@ -143,6 +147,7 @@
 	struct device			*dev;
 	struct platform_device		*pdev;
 	struct list_head		list;
+	bool				prev_reading_avail;
 	struct regmap			*map;
 	struct regmap_field		*status_field;
 	void __iomem			*tsens_srot_addr;
@@ -158,7 +163,10 @@
 };
 
 extern const struct tsens_data data_tsens2xxx, data_tsens23xx, data_tsens24xx;
-extern const struct tsens_data data_tsens14xx;
+extern const struct tsens_data data_tsens14xx, data_tsens1xxx_8909;
 extern struct list_head tsens_device_list;
 
+extern int calibrate_8937(struct tsens_device *tmdev);
+extern int calibrate_8909(struct tsens_device *tmdev);
+
 #endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/tsens1xxx.c b/drivers/thermal/tsens1xxx.c
index 19e2b5a..d698aed 100644
--- a/drivers/thermal/tsens1xxx.c
+++ b/drivers/thermal/tsens1xxx.c
@@ -33,6 +33,7 @@
 #define TSENS_UPPER_THRESHOLD_SHIFT	10
 
 #define TSENS_S0_STATUS_ADDR(n)		((n) + 0x30)
+#define TSENS_S0_TRDY_ADDR(n)		((n) + 0x5c)
 #define TSENS_SN_ADDR_OFFSET		0x4
 #define TSENS_SN_STATUS_TEMP_MASK	0x3ff
 #define TSENS_SN_STATUS_LOWER_STATUS	BIT(11)
@@ -55,102 +56,6 @@
 #define TSENS_THRESHOLD_MIN_CODE	0x0
 #define TSENS_SCALE_MILLIDEG		1000
 
-/* eeprom layout data for 8937 */
-#define BASE0_MASK	0x000000ff
-#define BASE1_MASK	0xff000000
-#define BASE1_SHIFT	24
-
-#define S0_P1_MASK		0x000001f8
-#define S1_P1_MASK		0x001f8000
-#define S2_P1_MASK_0_4		0xf8000000
-#define S2_P1_MASK_5		0x00000001
-#define S3_P1_MASK		0x00001f80
-#define S4_P1_MASK		0x01f80000
-#define S5_P1_MASK		0x00003f00
-#define S6_P1_MASK		0x03f00000
-#define S7_P1_MASK		0x0000003f
-#define S8_P1_MASK		0x0003f000
-#define S9_P1_MASK		0x0000003f
-#define S10_P1_MASK		0x0003f000
-
-#define S0_P2_MASK		0x00007e00
-#define S1_P2_MASK		0x07e00000
-#define S2_P2_MASK		0x0000007e
-#define S3_P2_MASK		0x0007e000
-#define S4_P2_MASK		0x7e000000
-#define S5_P2_MASK		0x000fc000
-#define S6_P2_MASK		0xfc000000
-#define S7_P2_MASK		0x00000fc0
-#define S8_P2_MASK		0x00fc0000
-#define S9_P2_MASK		0x00000fc0
-#define S10_P2_MASK		0x00fc0000
-
-#define S0_P1_SHIFT     3
-#define S1_P1_SHIFT     15
-#define S2_P1_SHIFT_0_4 27
-#define S2_P1_SHIFT_5   5
-#define S3_P1_SHIFT     7
-#define S4_P1_SHIFT     19
-#define S5_P1_SHIFT     8
-#define S6_P1_SHIFT     20
-#define S8_P1_SHIFT     12
-#define S10_P1_SHIFT    12
-
-#define S0_P2_SHIFT     9
-#define S1_P2_SHIFT     21
-#define S2_P2_SHIFT     1
-#define S3_P2_SHIFT     13
-#define S4_P2_SHIFT     25
-#define S5_P2_SHIFT     14
-#define S6_P2_SHIFT     26
-#define S7_P2_SHIFT     6
-#define S8_P2_SHIFT     18
-#define S9_P2_SHIFT     6
-#define S10_P2_SHIFT    18
-
-#define CAL_SEL_MASK	0x00000007
-
-#define CAL_DEGC_PT1		30
-#define CAL_DEGC_PT2		120
-#define SLOPE_FACTOR		1000
-#define SLOPE_DEFAULT		3200
-
-/*
- * Use this function on devices where slope and offset calculations
- * depend on calibration data read from qfprom. On others the slope
- * and offset values are derived from tz->tzp->slope and tz->tzp->offset
- * resp.
- */
-static void compute_intercept_slope(struct tsens_device *tmdev, u32 *p1,
-			     u32 *p2, u32 mode)
-{
-	int i;
-	int num, den;
-
-	for (i = 0; i < TSENS_1x_MAX_SENSORS; i++) {
-		pr_debug(
-			"sensor%d - data_point1:%#x data_point2:%#x\n",
-			i, p1[i], p2[i]);
-
-		tmdev->sensor[i].slope = SLOPE_DEFAULT;
-		if (mode == TWO_PT_CALIB) {
-			/*
-			 * slope (m) = adc_code2 - adc_code1 (y2 - y1)/
-			 *	temp_120_degc - temp_30_degc (x2 - x1)
-			 */
-			num = p2[i] - p1[i];
-			num *= SLOPE_FACTOR;
-			den = CAL_DEGC_PT2 - CAL_DEGC_PT1;
-			tmdev->sensor[i].slope = num / den;
-		}
-
-		tmdev->sensor[i].offset = (p1[i] * SLOPE_FACTOR) -
-				(CAL_DEGC_PT1 *
-				tmdev->sensor[i].slope);
-		pr_debug("offset:%d\n", tmdev->sensor[i].offset);
-	}
-}
-
 static int code_to_degc(u32 adc_code, const struct tsens_sensor *sensor)
 {
 	int degc, num, den;
@@ -184,72 +89,6 @@
 	return code;
 }
 
-static int calibrate_8937(struct tsens_device *tmdev)
-{
-	int base0 = 0, base1 = 0, i;
-	u32 p1[TSENS_1x_MAX_SENSORS], p2[TSENS_1x_MAX_SENSORS];
-	int mode = 0, tmp = 0;
-	u32 qfprom_cdata[5] = {0, 0, 0, 0, 0};
-
-	qfprom_cdata[0] = readl_relaxed(tmdev->tsens_calib_addr + 0x1D8);
-	qfprom_cdata[1] = readl_relaxed(tmdev->tsens_calib_addr + 0x1DC);
-	qfprom_cdata[2] = readl_relaxed(tmdev->tsens_calib_addr + 0x210);
-	qfprom_cdata[3] = readl_relaxed(tmdev->tsens_calib_addr + 0x214);
-	qfprom_cdata[4] = readl_relaxed(tmdev->tsens_calib_addr + 0x230);
-
-	mode = (qfprom_cdata[2] & CAL_SEL_MASK);
-	pr_debug("calibration mode is %d\n", mode);
-
-	switch (mode) {
-	case TWO_PT_CALIB:
-		base1 = (qfprom_cdata[1] & BASE1_MASK) >> BASE1_SHIFT;
-		p2[0] = (qfprom_cdata[2] & S0_P2_MASK) >> S0_P2_SHIFT;
-		p2[1] = (qfprom_cdata[2] & S1_P2_MASK) >> S1_P2_SHIFT;
-		p2[2] = (qfprom_cdata[3] & S2_P2_MASK) >> S2_P2_SHIFT;
-		p2[3] = (qfprom_cdata[3] & S3_P2_MASK) >> S3_P2_SHIFT;
-		p2[4] = (qfprom_cdata[3] & S4_P2_MASK) >> S4_P2_SHIFT;
-		p2[5] = (qfprom_cdata[0] & S5_P2_MASK) >> S5_P2_SHIFT;
-		p2[6] = (qfprom_cdata[0] & S6_P2_MASK) >> S6_P2_SHIFT;
-		p2[7] = (qfprom_cdata[1] & S7_P2_MASK) >> S7_P2_SHIFT;
-		p2[8] = (qfprom_cdata[1] & S8_P2_MASK) >> S8_P2_SHIFT;
-		p2[9] = (qfprom_cdata[4] & S9_P2_MASK) >> S9_P2_SHIFT;
-		p2[10] = (qfprom_cdata[4] & S10_P2_MASK) >> S10_P2_SHIFT;
-
-		for (i = 0; i < TSENS_1x_MAX_SENSORS; i++)
-			p2[i] = ((base1 + p2[i]) << 2);
-		/* Fall through */
-	case ONE_PT_CALIB2:
-		base0 = (qfprom_cdata[0] & BASE0_MASK);
-		p1[0] = (qfprom_cdata[2] & S0_P1_MASK) >> S0_P1_SHIFT;
-		p1[1] = (qfprom_cdata[2] & S1_P1_MASK) >> S1_P1_SHIFT;
-		p1[2] = (qfprom_cdata[2] & S2_P1_MASK_0_4) >> S2_P1_SHIFT_0_4;
-		tmp = (qfprom_cdata[3] & S2_P1_MASK_5) << S2_P1_SHIFT_5;
-		p1[2] |= tmp;
-		p1[3] = (qfprom_cdata[3] & S3_P1_MASK) >> S3_P1_SHIFT;
-		p1[4] = (qfprom_cdata[3] & S4_P1_MASK) >> S4_P1_SHIFT;
-		p1[5] = (qfprom_cdata[0] & S5_P1_MASK) >> S5_P1_SHIFT;
-		p1[6] = (qfprom_cdata[0] & S6_P1_MASK) >> S6_P1_SHIFT;
-		p1[7] = (qfprom_cdata[1] & S7_P1_MASK);
-		p1[8] = (qfprom_cdata[1] & S8_P1_MASK) >> S8_P1_SHIFT;
-		p1[9] = (qfprom_cdata[4] & S9_P1_MASK);
-		p1[10] = (qfprom_cdata[4] & S10_P1_MASK) >> S10_P1_SHIFT;
-
-		for (i = 0; i < TSENS_1x_MAX_SENSORS; i++)
-			p1[i] = (((base0) + p1[i]) << 2);
-		break;
-	default:
-		for (i = 0; i < TSENS_1x_MAX_SENSORS; i++) {
-			p1[i] = 500;
-			p2[i] = 780;
-		}
-		break;
-	}
-
-	compute_intercept_slope(tmdev, p1, p2, mode);
-
-	return 0;
-}
-
 static int tsens1xxx_get_temp(struct tsens_sensor *sensor, int *temp)
 {
 	struct tsens_device *tmdev = NULL;
@@ -265,8 +104,21 @@
 
 	tmdev = sensor->tmdev;
 
-	trdy_addr = TSENS_TRDY_ADDR(tmdev->tsens_tm_addr);
-	sensor_addr = TSENS_SN_STATUS_ADDR(tmdev->tsens_tm_addr);
+	if ((tmdev->ctrl_data->ver_major == 1) &&
+			(tmdev->ctrl_data->ver_minor == 1)) {
+		trdy_addr = TSENS_S0_TRDY_ADDR(tmdev->tsens_tm_addr);
+		sensor_addr = TSENS_S0_STATUS_ADDR(tmdev->tsens_tm_addr);
+
+		if (!(tmdev->prev_reading_avail)) {
+			while (!((readl_relaxed(trdy_addr)) & TSENS_TRDY_MASK))
+				usleep_range(TSENS_TRDY_RDY_MIN_TIME,
+						TSENS_TRDY_RDY_MAX_TIME);
+			tmdev->prev_reading_avail = true;
+		}
+	} else {
+		trdy_addr = TSENS_TRDY_ADDR(tmdev->tsens_tm_addr);
+		sensor_addr = TSENS_SN_STATUS_ADDR(tmdev->tsens_tm_addr);
+	}
 
 	code = readl_relaxed(sensor_addr +
 			(sensor->hw_id << TSENS_STATUS_ADDR_OFFSET));
@@ -486,11 +338,17 @@
 	void __iomem *sensor_status_ctrl_addr;
 	u32 rc = 0, addr_offset;
 
-	sensor_status_addr = TSENS_SN_STATUS_ADDR(tm->tsens_tm_addr);
+
+	if ((tm->ctrl_data->ver_major == 1) &&
+			(tm->ctrl_data->ver_minor == 1))
+		sensor_status_addr = TSENS_S0_STATUS_ADDR(tm->tsens_tm_addr);
+	else
+		sensor_status_addr = TSENS_SN_STATUS_ADDR(tm->tsens_tm_addr);
+
 	sensor_status_ctrl_addr =
 		TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR(tm->tsens_tm_addr);
 
-	for (i = 0; i < TSENS_1x_MAX_SENSORS; i++) {
+	for (i = 0; i < tm->ctrl_data->num_sensors; i++) {
 		bool upper_thr = false, lower_thr = false;
 
 		if (IS_ERR(tm->sensor[i].tzd))
@@ -581,7 +439,12 @@
 	void __iomem *srot_addr;
 	unsigned int srot_val, sensor_en;
 
-	srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
+	if ((tmdev->ctrl_data->ver_major == 1) &&
+			(tmdev->ctrl_data->ver_minor == 1))
+		srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr);
+	else
+		srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
+
 	srot_val = readl_relaxed(srot_addr);
 	srot_val = TSENS_CTRL_SENSOR_EN_MASK(srot_val);
 
@@ -595,7 +458,12 @@
 	void __iomem *srot_addr;
 	unsigned int srot_val;
 
-	srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
+	if ((tmdev->ctrl_data->ver_major == 1) &&
+			(tmdev->ctrl_data->ver_minor == 1))
+		srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr);
+	else
+		srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
+
 	srot_val = readl_relaxed(srot_addr);
 	if (!(srot_val & TSENS_EN)) {
 		pr_err("TSENS device is not enabled\n");
@@ -665,9 +533,27 @@
 };
 
 const struct tsens_data data_tsens14xx = {
+	.num_sensors = TSENS_NUM_SENSORS_8937,
 	.ops = &ops_tsens1xxx,
 	.valid_status_check = true,
 	.mtc = true,
 	.ver_major = 1,
 	.ver_minor = 4,
 };
+
+static const struct tsens_ops ops_tsens1xxx_8909 = {
+	.hw_init		= tsens1xxx_hw_init,
+	.get_temp		= tsens1xxx_get_temp,
+	.set_trips		= tsens1xxx_set_trip_temp,
+	.interrupts_reg	= tsens1xxx_register_interrupts,
+	.sensor_en		= tsens1xxx_hw_sensor_en,
+	.calibrate		= calibrate_8909,
+	.dbg			= tsens2xxx_dbg,
+};
+
+const struct tsens_data data_tsens1xxx_8909 = {
+	.num_sensors = TSENS_NUM_SENSORS_8909,
+	.ops = &ops_tsens1xxx_8909,
+	.ver_major = 1,
+	.ver_minor = 1,
+};
diff --git a/drivers/thermal/tsens_calib.c b/drivers/thermal/tsens_calib.c
new file mode 100644
index 0000000..04dd5c5
--- /dev/null
+++ b/drivers/thermal/tsens_calib.c
@@ -0,0 +1,285 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include "tsens.h"
+
+/* eeprom layout data for 8937 */
+#define BASE0_MASK_8937				0x000000ff
+#define BASE1_MASK_8937				0xff000000
+#define BASE1_SHIFT_8937			24
+
+#define S0_P1_MASK_8937				0x000001f8
+#define S1_P1_MASK_8937				0x001f8000
+#define S2_P1_MASK_0_4_8937			0xf8000000
+#define S2_P1_MASK_5_8937			0x00000001
+#define S3_P1_MASK_8937				0x00001f80
+#define S4_P1_MASK_8937				0x01f80000
+#define S5_P1_MASK_8937				0x00003f00
+#define S6_P1_MASK_8937				0x03f00000
+#define S7_P1_MASK_8937				0x0000003f
+#define S8_P1_MASK_8937				0x0003f000
+#define S9_P1_MASK_8937				0x0000003f
+#define S10_P1_MASK_8937			0x0003f000
+
+#define S0_P2_MASK_8937				0x00007e00
+#define S1_P2_MASK_8937				0x07e00000
+#define S2_P2_MASK_8937				0x0000007e
+#define S3_P2_MASK_8937				0x0007e000
+#define S4_P2_MASK_8937				0x7e000000
+#define S5_P2_MASK_8937				0x000fc000
+#define S6_P2_MASK_8937				0xfc000000
+#define S7_P2_MASK_8937				0x00000fc0
+#define S8_P2_MASK_8937				0x00fc0000
+#define S9_P2_MASK_8937				0x00000fc0
+#define S10_P2_MASK_8937			0x00fc0000
+
+#define S0_P1_SHIFT_8937			3
+#define S1_P1_SHIFT_8937			15
+#define S2_P1_SHIFT_0_4_8937			27
+#define S2_P1_SHIFT_5_8937			5
+#define S3_P1_SHIFT_8937			7
+#define S4_P1_SHIFT_8937			19
+#define S5_P1_SHIFT_8937			8
+#define S6_P1_SHIFT_8937			20
+#define S8_P1_SHIFT_8937			12
+#define S10_P1_SHIFT_8937			12
+
+#define S0_P2_SHIFT_8937			9
+#define S1_P2_SHIFT_8937			21
+#define S2_P2_SHIFT_8937			1
+#define S3_P2_SHIFT_8937			13
+#define S4_P2_SHIFT_8937			25
+#define S5_P2_SHIFT_8937			14
+#define S6_P2_SHIFT_8937			26
+#define S7_P2_SHIFT_8937			6
+#define S8_P2_SHIFT_8937			18
+#define S9_P2_SHIFT_8937			6
+#define S10_P2_SHIFT_8937			18
+
+#define CAL_SEL_MASK_8937			0x00000007
+
+/* eeprom layout for 8909 */
+#define TSENS_EEPROM(n)				((n) + 0xa0)
+#define BASE0_MASK_8909				0x000000ff
+#define BASE1_MASK_8909				0x0000ff00
+
+#define S0_P1_MASK_8909				0x0000003f
+#define S1_P1_MASK_8909				0x0003f000
+#define S2_P1_MASK_8909				0x3f000000
+#define S3_P1_MASK_8909				0x000003f0
+#define S4_P1_MASK_8909				0x003f0000
+
+#define S0_P2_MASK_8909				0x00000fc0
+#define S1_P2_MASK_8909				0x00fc0000
+#define S2_P2_MASK_0_1_8909				0xc0000000
+#define S2_P2_MASK_2_5_8909				0x0000000f
+#define S3_P2_MASK_8909				0x0000fc00
+#define S4_P2_MASK_8909				0x0fc00000
+
+#define TSENS_CAL_SEL_8909				0x00070000
+#define CAL_SEL_SHIFT_8909				16
+#define BASE1_SHIFT_8909				8
+
+#define S1_P1_SHIFT_8909				12
+#define S2_P1_SHIFT_8909				24
+#define S3_P1_SHIFT_8909				4
+#define S4_P1_SHIFT_8909				16
+
+#define S0_P2_SHIFT_8909				6
+#define S1_P2_SHIFT_8909				18
+#define S2_P2_SHIFT_0_1_8909				30
+#define S2_P2_SHIFT_2_5_8909				2
+#define S3_P2_SHIFT_8909				10
+#define S4_P2_SHIFT_8909				22
+
+#define CAL_DEGC_PT1				30
+#define CAL_DEGC_PT2				120
+/*
+ * Use this function on devices where slope and offset calculations
+ * depend on calibration data read from qfprom. On others the slope
+ * and offset values are derived from tz->tzp->slope and tz->tzp->offset
+ * resp.
+ */
+static void compute_intercept_slope(struct tsens_device *tmdev, u32 *p1,
+	u32 *p2, u32 mode)
+{
+	int i;
+	int num, den;
+
+	for (i = 0; i < tmdev->ctrl_data->num_sensors; i++) {
+		pr_debug(
+			"sensor%d - data_point1:%#x data_point2:%#x\n",
+			i, p1[i], p2[i]);
+
+		tmdev->sensor[i].slope = SLOPE_DEFAULT;
+		if (mode == TWO_PT_CALIB) {
+			/*
+			 * slope (m) = adc_code2 - adc_code1 (y2 - y1)/
+			 *	temp_120_degc - temp_30_degc (x2 - x1)
+			 */
+			num = p2[i] - p1[i];
+			num *= SLOPE_FACTOR;
+			den = CAL_DEGC_PT2 - CAL_DEGC_PT1;
+			tmdev->sensor[i].slope = num / den;
+		}
+
+		tmdev->sensor[i].offset = (p1[i] * SLOPE_FACTOR) -
+			(CAL_DEGC_PT1 *
+			tmdev->sensor[i].slope);
+		pr_debug("offset:%d\n", tmdev->sensor[i].offset);
+	}
+}
+
+int calibrate_8937(struct tsens_device *tmdev)
+{
+	int base0 = 0, base1 = 0, i;
+	u32 p1[TSENS_NUM_SENSORS_8937], p2[TSENS_NUM_SENSORS_8937];
+	int mode = 0, tmp = 0;
+	u32 qfprom_cdata[5] = {0, 0, 0, 0, 0};
+
+	qfprom_cdata[0] = readl_relaxed(tmdev->tsens_calib_addr + 0x1D8);
+	qfprom_cdata[1] = readl_relaxed(tmdev->tsens_calib_addr + 0x1DC);
+	qfprom_cdata[2] = readl_relaxed(tmdev->tsens_calib_addr + 0x210);
+	qfprom_cdata[3] = readl_relaxed(tmdev->tsens_calib_addr + 0x214);
+	qfprom_cdata[4] = readl_relaxed(tmdev->tsens_calib_addr + 0x230);
+
+	mode = (qfprom_cdata[2] & CAL_SEL_MASK_8937);
+	pr_debug("calibration mode is %d\n", mode);
+
+	switch (mode) {
+	case TWO_PT_CALIB:
+		base1 = (qfprom_cdata[1] &
+				BASE1_MASK_8937) >> BASE1_SHIFT_8937;
+		p2[0] = (qfprom_cdata[2] &
+				S0_P2_MASK_8937) >> S0_P2_SHIFT_8937;
+		p2[1] = (qfprom_cdata[2] &
+				S1_P2_MASK_8937) >> S1_P2_SHIFT_8937;
+		p2[2] = (qfprom_cdata[3] &
+				S2_P2_MASK_8937) >> S2_P2_SHIFT_8937;
+		p2[3] = (qfprom_cdata[3] &
+				S3_P2_MASK_8937) >> S3_P2_SHIFT_8937;
+		p2[4] = (qfprom_cdata[3] &
+				S4_P2_MASK_8937) >> S4_P2_SHIFT_8937;
+		p2[5] = (qfprom_cdata[0] &
+				S5_P2_MASK_8937) >> S5_P2_SHIFT_8937;
+		p2[6] = (qfprom_cdata[0] &
+				S6_P2_MASK_8937) >> S6_P2_SHIFT_8937;
+		p2[7] = (qfprom_cdata[1] &
+				S7_P2_MASK_8937) >> S7_P2_SHIFT_8937;
+		p2[8] = (qfprom_cdata[1] &
+				S8_P2_MASK_8937) >> S8_P2_SHIFT_8937;
+		p2[9] = (qfprom_cdata[4] &
+				S9_P2_MASK_8937) >> S9_P2_SHIFT_8937;
+		p2[10] = (qfprom_cdata[4] &
+				S10_P2_MASK_8937) >> S10_P2_SHIFT_8937;
+
+		for (i = 0; i < TSENS_NUM_SENSORS_8937; i++)
+			p2[i] = ((base1 + p2[i]) << 2);
+		/* Fall through */
+	case ONE_PT_CALIB2:
+		base0 = (qfprom_cdata[0] & BASE0_MASK_8937);
+		p1[0] = (qfprom_cdata[2] &
+				S0_P1_MASK_8937) >> S0_P1_SHIFT_8937;
+		p1[1] = (qfprom_cdata[2] &
+				S1_P1_MASK_8937) >> S1_P1_SHIFT_8937;
+		p1[2] = (qfprom_cdata[2] &
+				S2_P1_MASK_0_4_8937) >> S2_P1_SHIFT_0_4_8937;
+		tmp = (qfprom_cdata[3] &
+				S2_P1_MASK_5_8937) << S2_P1_SHIFT_5_8937;
+		p1[2] |= tmp;
+		p1[3] = (qfprom_cdata[3] &
+				S3_P1_MASK_8937) >> S3_P1_SHIFT_8937;
+		p1[4] = (qfprom_cdata[3] &
+				S4_P1_MASK_8937) >> S4_P1_SHIFT_8937;
+		p1[5] = (qfprom_cdata[0] &
+				S5_P1_MASK_8937) >> S5_P1_SHIFT_8937;
+		p1[6] = (qfprom_cdata[0] &
+				S6_P1_MASK_8937) >> S6_P1_SHIFT_8937;
+		p1[7] = (qfprom_cdata[1] & S7_P1_MASK_8937);
+		p1[8] = (qfprom_cdata[1] &
+				S8_P1_MASK_8937) >> S8_P1_SHIFT_8937;
+		p1[9] = (qfprom_cdata[4] & S9_P1_MASK_8937);
+		p1[10] = (qfprom_cdata[4] &
+				S10_P1_MASK_8937) >> S10_P1_SHIFT_8937;
+
+		for (i = 0; i < TSENS_NUM_SENSORS_8937; i++)
+			p1[i] = (((base0) + p1[i]) << 2);
+		break;
+	default:
+		for (i = 0; i < TSENS_NUM_SENSORS_8937; i++) {
+			p1[i] = 500;
+			p2[i] = 780;
+		}
+		break;
+	}
+
+	compute_intercept_slope(tmdev, p1, p2, mode);
+
+	return 0;
+}
+
+int calibrate_8909(struct tsens_device *tmdev)
+{
+	int i, base0 = 0, base1 = 0;
+	u32 p1[TSENS_NUM_SENSORS_8909], p2[TSENS_NUM_SENSORS_8909];
+	int mode = 0, temp = 0;
+	uint32_t calib_data[3] = {0, 0, 0};
+
+	calib_data[0] = readl_relaxed(
+		TSENS_EEPROM(tmdev->tsens_calib_addr));
+	calib_data[1] = readl_relaxed(
+		(TSENS_EEPROM(tmdev->tsens_calib_addr) + 0x4));
+	calib_data[2] = readl_relaxed(
+		(TSENS_EEPROM(tmdev->tsens_calib_addr) + 0x3c));
+	mode = (calib_data[2] & TSENS_CAL_SEL_8909) >> CAL_SEL_SHIFT_8909;
+
+	pr_debug("calib mode is %d\n", mode);
+
+	switch (mode) {
+	case TWO_PT_CALIB:
+		base1 = (calib_data[2] & BASE1_MASK_8909) >> BASE1_SHIFT_8909;
+		p2[0] = (calib_data[0] & S0_P2_MASK_8909) >> S0_P2_SHIFT_8909;
+		p2[1] = (calib_data[0] & S1_P2_MASK_8909) >> S1_P2_SHIFT_8909;
+		p2[2] = (calib_data[0] &
+				S2_P2_MASK_0_1_8909) >> S2_P2_SHIFT_0_1_8909;
+		temp  = (calib_data[1] &
+				S2_P2_MASK_2_5_8909) << S2_P2_SHIFT_2_5_8909;
+		p2[2] |= temp;
+		p2[3] = (calib_data[1] & S3_P2_MASK_8909) >> S3_P2_SHIFT_8909;
+		p2[4] = (calib_data[1] & S4_P2_MASK_8909) >> S4_P2_SHIFT_8909;
+
+		for (i = 0; i < TSENS_NUM_SENSORS_8909; i++)
+			p2[i] = ((base1 + p2[i]) << 2);
+	/* Fall through */
+	case ONE_PT_CALIB2:
+		base0 = (calib_data[2] & BASE0_MASK_8909);
+		p1[0] = (calib_data[0] & S0_P1_MASK_8909);
+		p1[1] = (calib_data[0] & S1_P1_MASK_8909) >> S1_P1_SHIFT_8909;
+		p1[2] = (calib_data[0] & S2_P1_MASK_8909) >> S2_P1_SHIFT_8909;
+		p1[3] = (calib_data[1] & S3_P1_MASK_8909) >> S3_P1_SHIFT_8909;
+		p1[4] = (calib_data[1] & S4_P1_MASK_8909) >> S4_P1_SHIFT_8909;
+		for (i = 0; i < TSENS_NUM_SENSORS_8909; i++)
+			p1[i] = (((base0) + p1[i]) << 2);
+		break;
+	default:
+		for (i = 0; i < TSENS_NUM_SENSORS_8909; i++) {
+			p1[i] = 500;
+			p2[i] = 780;
+		}
+		break;
+	}
+
+	compute_intercept_slope(tmdev, p1, p2, mode);
+	return 0;
+}
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index fe22917..9e9016e 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -137,6 +137,9 @@
 	struct mutex mutex;
 
 	/* Link layer */
+	int mode;
+#define DLCI_MODE_ABM		0	/* Normal Asynchronous Balanced Mode */
+#define DLCI_MODE_ADM		1	/* Asynchronous Disconnected Mode */
 	spinlock_t lock;	/* Protects the internal state */
 	struct timer_list t1;	/* Retransmit timer for SABM and UA */
 	int retries;
@@ -1380,7 +1383,13 @@
 	ctrl->data = data;
 	ctrl->len = clen;
 	gsm->pending_cmd = ctrl;
-	gsm->cretries = gsm->n2;
+
+	/* If DLCI0 is in ADM mode skip retries, it won't respond */
+	if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
+		gsm->cretries = 1;
+	else
+		gsm->cretries = gsm->n2;
+
 	mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
 	gsm_control_transmit(gsm, ctrl);
 	spin_unlock_irqrestore(&gsm->control_lock, flags);
@@ -1488,6 +1497,7 @@
 			if (debug & 8)
 				pr_info("DLCI %d opening in ADM mode.\n",
 					dlci->addr);
+			dlci->mode = DLCI_MODE_ADM;
 			gsm_dlci_open(dlci);
 		} else {
 			gsm_dlci_close(dlci);
@@ -2865,11 +2875,22 @@
 static int gsm_carrier_raised(struct tty_port *port)
 {
 	struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+	struct gsm_mux *gsm = dlci->gsm;
+
 	/* Not yet open so no carrier info */
 	if (dlci->state != DLCI_OPEN)
 		return 0;
 	if (debug & 2)
 		return 1;
+
+	/*
+	 * Basic mode with control channel in ADM mode may not respond
+	 * to CMD_MSC at all and modem_rx is empty.
+	 */
+	if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM &&
+	    !dlci->modem_rx)
+		return 1;
+
 	return dlci->modem_rx & TIOCM_CD;
 }
 
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index f6e4373..5d9038a 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1815,7 +1815,8 @@
 
 	status = serial_port_in(port, UART_LSR);
 
-	if (status & (UART_LSR_DR | UART_LSR_BI)) {
+	if (status & (UART_LSR_DR | UART_LSR_BI) &&
+	    iir & UART_IIR_RDI) {
 		if (!up->dma || handle_rx_dma(up, iir))
 			status = serial8250_rx_chars(up, status);
 	}
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 5ac06fc..fec48de 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -596,6 +596,11 @@
 	if (dev_id < 0)
 		dev_id = 0;
 
+	if (dev_id >= ARRAY_SIZE(arc_uart_ports)) {
+		dev_err(&pdev->dev, "serial%d out of range\n", dev_id);
+		return -EINVAL;
+	}
+
 	uart = &arc_uart_ports[dev_id];
 	port = &uart->port;
 
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 3b31fd8..b9a4625 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -172,7 +172,7 @@
  */
 int __init setup_earlycon(char *buf)
 {
-	const struct earlycon_id *match;
+	const struct earlycon_id **p_match;
 
 	if (!buf || !buf[0])
 		return -EINVAL;
@@ -180,7 +180,9 @@
 	if (early_con.flags & CON_ENABLED)
 		return -EALREADY;
 
-	for (match = __earlycon_table; match < __earlycon_table_end; match++) {
+	for (p_match = __earlycon_table; p_match < __earlycon_table_end;
+	     p_match++) {
+		const struct earlycon_id *match = *p_match;
 		size_t len = strlen(match->name);
 
 		if (strncmp(buf, match->name, len))
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 76103f2..937f5e1 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1902,6 +1902,10 @@
 		dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
 		return ret;
 	}
+	if (ret >= ARRAY_SIZE(lpuart_ports)) {
+		dev_err(&pdev->dev, "serial%d out of range\n", ret);
+		return -EINVAL;
+	}
 	sport->port.line = ret;
 	sport->lpuart32 = of_device_is_compatible(np, "fsl,ls1021a-lpuart");
 
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index f575a33..b24edf6 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2080,6 +2080,12 @@
 	else if (ret < 0)
 		return ret;
 
+	if (sport->port.line >= ARRAY_SIZE(imx_ports)) {
+		dev_err(&pdev->dev, "serial%d out of range\n",
+			sport->port.line);
+		return -EINVAL;
+	}
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	base = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(base))
@@ -2145,7 +2151,9 @@
 		 * and DCD (when they are outputs) or enables the respective
 		 * irqs. So set this bit early, i.e. before requesting irqs.
 		 */
-		writel(UFCR_DCEDTE, sport->port.membase + UFCR);
+		reg = readl(sport->port.membase + UFCR);
+		if (!(reg & UFCR_DCEDTE))
+			writel(reg | UFCR_DCEDTE, sport->port.membase + UFCR);
 
 		/*
 		 * Disable UCR3_RI and UCR3_DCD irqs. They are also not
@@ -2156,7 +2164,15 @@
 		       sport->port.membase + UCR3);
 
 	} else {
-		writel(0, sport->port.membase + UFCR);
+		unsigned long ucr3 = UCR3_DSR;
+
+		reg = readl(sport->port.membase + UFCR);
+		if (reg & UFCR_DCEDTE)
+			writel(reg & ~UFCR_DCEDTE, sport->port.membase + UFCR);
+
+		if (!is_imx1_uart(sport))
+			ucr3 |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP;
+		writel(ucr3, sport->port.membase + UCR3);
 	}
 
 	clk_disable_unprepare(sport->clk_ipg);
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 8e5d100..4e895ee 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -617,7 +617,6 @@
 static void msm_geni_serial_complete_rx_eot(struct uart_port *uport)
 {
 	int poll_done = 0, tries = 0;
-	u32 geni_status = 0;
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 
 	do {
@@ -626,11 +625,11 @@
 		tries++;
 	} while (!poll_done && tries < 5);
 
-	geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
-
 	if (!poll_done)
-		IPC_LOG_MSG(port->ipc_log_misc, "%s: RX_EOT, GENI:0x%x\n",
-							__func__, geni_status);
+		IPC_LOG_MSG(port->ipc_log_misc,
+		"%s: RX_EOT, GENI:0x%x, DMA_DEBUG:0x%x\n", __func__,
+		geni_read_reg_nolog(uport->membase, SE_GENI_STATUS),
+		geni_read_reg_nolog(uport->membase, SE_DMA_DEBUG_REG0));
 	else
 		geni_write_reg_nolog(RX_EOT, uport->membase, SE_DMA_RX_IRQ_CLR);
 }
@@ -1131,7 +1130,9 @@
 	 * cancel control bit.
 	 */
 	mb();
-	msm_geni_serial_complete_rx_eot(uport);
+	if (!uart_console(uport))
+		msm_geni_serial_complete_rx_eot(uport);
+
 	done = msm_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
 					S_GENI_CMD_CANCEL, false);
 	if (done) {
@@ -1856,6 +1857,7 @@
 	unsigned long ser_clk_cfg = 0;
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 	unsigned long clk_rate;
+	unsigned long flags;
 
 	if (!uart_console(uport)) {
 		int ret = msm_geni_serial_power_on(uport);
@@ -1867,7 +1869,13 @@
 			return;
 		}
 	}
+	/* Take a spinlock else stop_rx causes a race with an ISR due to Cancel
+	 * and FSM_RESET. This also has a potential race with the dma_map/unmap
+	 * operations of ISR.
+	 */
+	spin_lock_irqsave(&uport->lock, flags);
 	msm_geni_serial_stop_rx(uport);
+	spin_unlock_irqrestore(&uport->lock, flags);
 	/* baud rate */
 	baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
 	port->cur_baud = baud;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index efaac5e..69fa4dc 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -395,10 +395,22 @@
 
 static inline void msm_wait_for_xmitr(struct uart_port *port)
 {
+	u32 count = 500000;
+
 	while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
 		if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
 			break;
 		udelay(1);
+
+		/* At worst case, it is stuck in this loop for waiting
+		 * TX ready, have a 500ms timeout to avoid stuck here
+		 * and only miss some log to uart.
+		 */
+		if (count-- == 0) {
+			msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
+			printk_deferred("uart may lost data, resetting TX!\n");
+			break;
+		}
 	}
 	msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR);
 }
@@ -1858,10 +1870,35 @@
 
 	return 0;
 }
+
+static int msm_serial_freeze(struct device *dev)
+{
+	struct uart_port *port = dev_get_drvdata(dev);
+	struct msm_port *msm_port = UART_TO_MSM(port);
+	int ret;
+
+	ret = msm_serial_suspend(dev);
+	if (ret)
+		return ret;
+
+	/*
+	 * Set the rate as recommended to avoid issues where the clock
+	 * driver skips reconfiguring the clock hardware during
+	 * hibernation resume.
+	 */
+	return clk_set_rate(msm_port->clk, 19200000);
+}
 #endif
 
 static const struct dev_pm_ops msm_serial_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(msm_serial_suspend, msm_serial_resume)
+#ifdef CONFIG_PM_SLEEP
+	.suspend = msm_serial_suspend,
+	.resume = msm_serial_resume,
+	.freeze = msm_serial_freeze,
+	.thaw = msm_serial_resume,
+	.poweroff = msm_serial_suspend,
+	.restore = msm_serial_resume,
+#endif
 };
 
 static struct platform_driver msm_platform_driver = {
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 07390f8..1d9d778 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1664,6 +1664,10 @@
 		s->port.line = pdev->id < 0 ? 0 : pdev->id;
 	else if (ret < 0)
 		return ret;
+	if (s->port.line >= ARRAY_SIZE(auart_port)) {
+		dev_err(&pdev->dev, "serial%d out of range\n", s->port.line);
+		return -EINVAL;
+	}
 
 	if (of_id) {
 		pdev->id_entry = of_id->data;
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index d65f92b..f2ab6d8a 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1813,6 +1813,10 @@
 
 	dbg("s3c24xx_serial_probe(%p) %d\n", pdev, index);
 
+	if (index >= ARRAY_SIZE(s3c24xx_serial_ports)) {
+		dev_err(&pdev->dev, "serial%d out of range\n", index);
+		return -EINVAL;
+	}
 	ourport = &s3c24xx_serial_ports[index];
 
 	ourport->drv_data = s3c24xx_get_driver_data(pdev);
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index cdd2f94..b9c7a90 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -889,7 +889,16 @@
 			goto err_out;
 		uartclk = 0;
 	} else {
-		clk_prepare_enable(clk);
+		ret = clk_prepare_enable(clk);
+		if (ret)
+			goto err_out;
+
+		ret = devm_add_action_or_reset(&pdev->dev,
+				(void(*)(void *))clk_disable_unprepare,
+				clk);
+		if (ret)
+			goto err_out;
+
 		uartclk = clk_get_rate(clk);
 	}
 
@@ -988,7 +997,7 @@
 	uart_unregister_driver(&s->uart);
 err_out:
 	if (!IS_ERR(s->regulator))
-		return regulator_disable(s->regulator);
+		regulator_disable(s->regulator);
 
 	return ret;
 }
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index dd4c02f..7497f1d 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1106,7 +1106,7 @@
 	struct uart_port *port;
 
 	/* Try the given port id if failed use default method */
-	if (cdns_uart_port[id].mapbase != 0) {
+	if (id < CDNS_UART_NR_PORTS && cdns_uart_port[id].mapbase != 0) {
 		/* Find the next unused port */
 		for (id = 0; id < CDNS_UART_NR_PORTS; id++)
 			if (cdns_uart_port[id].mapbase == 0)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 4ee0a9d..789c814 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -3170,7 +3170,10 @@
 
 	kref_init(&tty->kref);
 	tty->magic = TTY_MAGIC;
-	tty_ldisc_init(tty);
+	if (tty_ldisc_init(tty)) {
+		kfree(tty);
+		return NULL;
+	}
 	tty->session = NULL;
 	tty->pgrp = NULL;
 	mutex_init(&tty->legacy_mutex);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 3a9e2a2..4ab518d 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -175,12 +175,11 @@
 			return ERR_CAST(ldops);
 	}
 
-	ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL);
-	if (ld == NULL) {
-		put_ldops(ldops);
-		return ERR_PTR(-ENOMEM);
-	}
-
+	/*
+	 * There is no way to handle allocation failure of only 16 bytes.
+	 * Let's simplify error handling and save more memory.
+	 */
+	ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL);
 	ld->ops = ldops;
 	ld->tty = tty;
 
@@ -753,12 +752,13 @@
  *	the tty structure is not completely set up when this call is made.
  */
 
-void tty_ldisc_init(struct tty_struct *tty)
+int tty_ldisc_init(struct tty_struct *tty)
 {
 	struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
 	if (IS_ERR(ld))
-		panic("n_tty: init_tty");
+		return PTR_ERR(ld);
 	tty->ldisc = ld;
+	return 0;
 }
 
 /**
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 34d23cc..fe22ac7 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -174,6 +174,7 @@
 		wb = &acm->wb[wbn];
 		if (!wb->use) {
 			wb->use = 1;
+			wb->len = 0;
 			return wbn;
 		}
 		wbn = (wbn + 1) % ACM_NW;
@@ -731,16 +732,18 @@
 static void acm_tty_flush_chars(struct tty_struct *tty)
 {
 	struct acm *acm = tty->driver_data;
-	struct acm_wb *cur = acm->putbuffer;
+	struct acm_wb *cur;
 	int err;
 	unsigned long flags;
 
+	spin_lock_irqsave(&acm->write_lock, flags);
+
+	cur = acm->putbuffer;
 	if (!cur) /* nothing to do */
-		return;
+		goto out;
 
 	acm->putbuffer = NULL;
 	err = usb_autopm_get_interface_async(acm->control);
-	spin_lock_irqsave(&acm->write_lock, flags);
 	if (err < 0) {
 		cur->use = 0;
 		acm->putbuffer = cur;
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 0dce6ab..876679a 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -186,7 +186,9 @@
 static const unsigned short high_speed_maxpacket_maxes[4] = {
 	[USB_ENDPOINT_XFER_CONTROL] = 64,
 	[USB_ENDPOINT_XFER_ISOC] = 1024,
-	[USB_ENDPOINT_XFER_BULK] = 512,
+
+	/* Bulk should be 512, but some devices use 1024: we will warn below */
+	[USB_ENDPOINT_XFER_BULK] = 1024,
 	[USB_ENDPOINT_XFER_INT] = 1024,
 };
 static const unsigned short super_speed_maxpacket_maxes[4] = {
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index ee33c0d..5532246 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1495,9 +1495,10 @@
 	 * Some buses would like to keep their devices in suspend
 	 * state after system resume.  Their resume happen when
 	 * a remote wakeup is detected or interface driver start
-	 * I/O.
+	 * I/O. And in the case when the system is restoring from
+	 * hibernation, make sure all the devices are resumed.
 	 */
-	if (udev->bus->skip_resume)
+	if (udev->bus->skip_resume && msg.event != PM_EVENT_RESTORE)
 		return 0;
 
 	/* For all calls, take the device back to full power and
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index e4b39a7..6a4ea98 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2430,6 +2430,7 @@
 
 	spin_lock_irqsave (&hcd_root_hub_lock, flags);
 	if (hcd->rh_registered) {
+		pm_wakeup_event(&hcd->self.root_hub->dev, 0);
 		set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
 		queue_work(pm_wq, &hcd->wakeup_work);
 	}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a9117ee..a9b3bbd 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -659,12 +659,17 @@
 		unsigned int portnum)
 {
 	struct usb_hub *hub;
+	struct usb_port *port_dev;
 
 	if (!hdev)
 		return;
 
 	hub = usb_hub_to_struct_hub(hdev);
 	if (hub) {
+		port_dev = hub->ports[portnum - 1];
+		if (port_dev && port_dev->child)
+			pm_wakeup_event(&port_dev->child->dev, 0);
+
 		set_bit(portnum, hub->wakeup_bits);
 		kick_hub_wq(hub);
 	}
@@ -3428,8 +3433,11 @@
 
 	/* Skip the initial Clear-Suspend step for a remote wakeup */
 	status = hub_port_status(hub, port1, &portstatus, &portchange);
-	if (status == 0 && !port_is_suspended(hub, portstatus))
+	if (status == 0 && !port_is_suspended(hub, portstatus)) {
+		if (portchange & USB_PORT_STAT_C_SUSPEND)
+			pm_wakeup_event(&udev->dev, 0);
 		goto SuspendCleared;
+	}
 
 	/* see 7.1.7.7; affects power usage, but not budgeting */
 	if (hub_is_superspeed(hub->hdev))
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 4f1c6f8..40ce175 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -45,6 +45,9 @@
 	{ USB_DEVICE(0x03f0, 0x0701), .driver_info =
 			USB_QUIRK_STRING_FETCH_255 },
 
+	/* HP v222w 16GB Mini USB Drive */
+	{ USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
+
 	/* Creative SB Audigy 2 NX */
 	{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
 
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 2a21a04..0f45a2f 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -209,7 +209,7 @@
 	unsigned char           dir_in;
 	unsigned char           index;
 	unsigned char           mc;
-	unsigned char           interval;
+	u16                     interval;
 
 	unsigned int            halted:1;
 	unsigned int            periodic:1;
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index cfdd5c3..09921ef 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2642,12 +2642,6 @@
 	dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
 	       DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0);
 
-	dwc2_hsotg_enqueue_setup(hsotg);
-
-	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
-		dwc2_readl(hsotg->regs + DIEPCTL0),
-		dwc2_readl(hsotg->regs + DOEPCTL0));
-
 	/* clear global NAKs */
 	val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
 	if (!is_usb_reset)
@@ -2658,6 +2652,12 @@
 	mdelay(3);
 
 	hsotg->lx_state = DWC2_L0;
+
+	dwc2_hsotg_enqueue_setup(hsotg);
+
+	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+		dwc2_readl(hsotg->regs + DIEPCTL0),
+		dwc2_readl(hsotg->regs + DOEPCTL0));
 }
 
 static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 919a321..0a0cf15 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2268,10 +2268,22 @@
  */
 static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
 {
-	u32 hcfg, hfir, otgctl;
+	u32 hcfg, hfir, otgctl, usbcfg;
 
 	dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
 
+	/* Set HS/FS Timeout Calibration to 7 (max available value).
+	 * The number of PHY clocks that the application programs in
+	 * this field is added to the high/full speed interpacket timeout
+	 * duration in the core to account for any additional delays
+	 * introduced by the PHY. This can be required, because the delay
+	 * introduced by the PHY in generating the linestate condition
+	 * can vary from one PHY to another.
+	 */
+	usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+	usbcfg |= GUSBCFG_TOUTCAL(7);
+	dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+
 	/* Restart the Phy Clock */
 	dwc2_writel(0, hsotg->regs + PCGCTL);
 
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index ef3f542..93d8e14 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -910,6 +910,21 @@
 	}
 
 	/*
+	 * Workaround for STAR 9001285599 which affects dwc3 core version 3.20a
+	 * only. If the PM TIMER ECN is enabled thru GUCTL2[19], then link
+	 * compliance test (TD7.21) may fail. If the ECN is not enabled
+	 * GUCTL2[19] = 0), the controller will use the old timer value (5us),
+	 * which is still fine for Link Compliance test. Hence Do not enable
+	 * PM TIMER ECN in V3.20a by setting GUCTL2[19] by default,
+	 * instead use GUCTL2[19] = 0.
+	 */
+	if (dwc->revision == DWC3_REVISION_320A) {
+		reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
+		reg &= ~DWC3_GUCTL2_LC_TIMER;
+		dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
+	}
+
+	/*
 	 * Enable hardware control of sending remote wakeup in HS when
 	 * the device is in the L1 state.
 	 */
@@ -1567,6 +1582,19 @@
 	return 0;
 }
 
+static int dwc3_pm_restore(struct device *dev)
+{
+	/*
+	 * Set the core as runtime active to prevent the runtime
+	 * PM ops being called before the PM restore is completed.
+	 */
+	pm_runtime_disable(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	return 0;
+}
+
 static int dwc3_resume(struct device *dev)
 {
 	struct dwc3	*dwc = dev_get_drvdata(dev);
@@ -1591,7 +1619,12 @@
 #endif /* CONFIG_PM_SLEEP */
 
 static const struct dev_pm_ops dwc3_dev_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
+	.suspend	= dwc3_suspend,
+	.resume		= dwc3_resume,
+	.freeze		= dwc3_suspend,
+	.thaw		= dwc3_pm_restore,
+	.poweroff	= dwc3_suspend,
+	.restore	= dwc3_pm_restore,
 	SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
 			dwc3_runtime_idle)
 };
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index e5fe7a4..0963aa3 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -269,6 +269,8 @@
 #define DWC3_GUSB3PIPECTL_ELASTIC_BUF_MODE	(1 << 0)
 
 /* Global TX Fifo Size Register */
+#define DWC31_GTXFIFOSIZ_TXFRAMNUM	BIT(15)		/* DWC_usb31 only */
+#define DWC31_GTXFIFOSIZ_TXFDEF(n)	((n) & 0x7fff)	/* DWC_usb31 only */
 #define DWC3_GTXFIFOSIZ_TXFDEF(n)	((n) & 0xffff)
 #define DWC3_GTXFIFOSIZ_TXFSTADDR(n)	((n) & 0xffff0000)
 
@@ -333,6 +335,7 @@
 #define DWC3_GUCTL2_RST_ACTBITLATER		(1 << 14)
 #define DWC3_GUCTL2_HP_TIMER(n)			((n) << 21)
 #define DWC3_GUCTL2_HP_TIMER_MASK		DWC3_GUCTL2_HP_TIMER(0x1f)
+#define DWC3_GUCTL2_LC_TIMER			(1 << 19)
 
 /* Device Configuration Register */
 #define DWC3_DCFG_DEVADDR(addr)	((addr) << 3)
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 810546a..bf1baf6 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -73,6 +73,8 @@
 
 /* XHCI registers */
 #define USB3_HCSPARAMS1		(0x4)
+#define USB3_HCCPARAMS2		(0x1c)
+#define HCC_CTC(p)		((p) & (1 << 3))
 #define USB3_PORTSC		(0x420)
 
 /**
@@ -255,6 +257,7 @@
 	struct notifier_block	host_restart_nb;
 
 	struct notifier_block	host_nb;
+	bool			xhci_ss_compliance_enable;
 
 	atomic_t                in_p3;
 	unsigned int		lpm_to_suspend_delay;
@@ -2093,6 +2096,19 @@
 	dwc3_core_init(dwc);
 	/* Re-configure event buffers */
 	dwc3_event_buffers_setup(dwc);
+
+	/* Get initial P3 status and enable IN_P3 event */
+	val = dwc3_msm_read_reg_field(mdwc->base,
+		DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
+	atomic_set(&mdwc->in_p3, val == DWC3_LINK_STATE_U3);
+	dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
+				PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
+
+	if (mdwc->otg_state == OTG_STATE_A_HOST) {
+		dev_dbg(mdwc->dev, "%s: set the core in host mode\n",
+							__func__);
+		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
+	}
 }
 
 static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
@@ -2246,7 +2262,7 @@
 	}
 }
 
-static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
+static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool hibernation)
 {
 	int ret;
 	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
@@ -2363,8 +2379,8 @@
 	clk_disable_unprepare(mdwc->xo_clk);
 
 	/* Perform controller power collapse */
-	if (!mdwc->in_host_mode && (!mdwc->in_device_mode ||
-					mdwc->in_restart)) {
+	if ((!mdwc->in_host_mode && (!mdwc->in_device_mode ||
+					mdwc->in_restart)) || hibernation) {
 		mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
 		dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
 		dwc3_msm_config_gdsc(mdwc, 0);
@@ -2520,8 +2536,6 @@
 
 	/* Recover from controller power collapse */
 	if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
-		u32 tmp;
-
 		if (mdwc->iommu_map) {
 			ret = arm_iommu_attach_device(mdwc->dev,
 					mdwc->iommu_map);
@@ -2536,13 +2550,6 @@
 
 		dwc3_msm_power_collapse_por(mdwc);
 
-		/* Get initial P3 status and enable IN_P3 event */
-		tmp = dwc3_msm_read_reg_field(mdwc->base,
-			DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
-		atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
-		dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
-					PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
-
 		mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
 	}
 
@@ -2821,8 +2828,11 @@
 	int ret;
 
 	mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
-	if (IS_ERR(mdwc->dwc3_gdsc))
+	if (IS_ERR(mdwc->dwc3_gdsc)) {
+		if (PTR_ERR(mdwc->dwc3_gdsc) == -EPROBE_DEFER)
+			return PTR_ERR(mdwc->dwc3_gdsc);
 		mdwc->dwc3_gdsc = NULL;
+	}
 
 	mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
 	if (IS_ERR(mdwc->xo_clk)) {
@@ -3279,6 +3289,35 @@
 static DEVICE_ATTR_RW(usb_compliance_mode);
 
 
+static ssize_t xhci_link_compliance_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	if (mdwc->xhci_ss_compliance_enable)
+		return snprintf(buf, PAGE_SIZE, "y\n");
+	else
+		return snprintf(buf, PAGE_SIZE, "n\n");
+}
+
+static ssize_t xhci_link_compliance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+	bool value;
+	int ret;
+
+	ret = strtobool(buf, &value);
+	if (!ret) {
+		mdwc->xhci_ss_compliance_enable = value;
+		return count;
+	}
+
+	return ret;
+}
+
+static DEVICE_ATTR_RW(xhci_link_compliance);
+
 static int dwc3_msm_probe(struct platform_device *pdev)
 {
 	struct device_node *node = pdev->dev.of_node, *dwc3_node;
@@ -3636,6 +3675,7 @@
 	device_create_file(&pdev->dev, &dev_attr_mode);
 	device_create_file(&pdev->dev, &dev_attr_speed);
 	device_create_file(&pdev->dev, &dev_attr_usb_compliance_mode);
+	device_create_file(&pdev->dev, &dev_attr_xhci_link_compliance);
 
 	host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
 	if (!dwc->is_drd && host_mode) {
@@ -3674,6 +3714,7 @@
 	int ret_pm;
 
 	device_remove_file(&pdev->dev, &dev_attr_mode);
+	device_remove_file(&pdev->dev, &dev_attr_xhci_link_compliance);
 	if (mdwc->usb_psy)
 		power_supply_put(mdwc->usb_psy);
 
@@ -3940,6 +3981,25 @@
 		}
 
 		/*
+		 * If the Compliance Transition Capability(CTC) flag of
+		 * HCCPARAMS2 register is set and xhci_link_compliance sysfs
+		 * param has been enabled by the user for the SuperSpeed host
+		 * controller, then write 10 (Link in Compliance Mode State)
+		 * onto the Port Link State(PLS) field of the PORTSC register
+		 * for 3.0 host controller which is at an offset of USB3_PORTSC
+		 * + 0x10 from the DWC3 base address. Also, disable the runtime
+		 * PM of 3.0 root hub (root hub of shared_hcd of xhci device)
+		 */
+		if (HCC_CTC(dwc3_msm_read_reg(mdwc->base, USB3_HCCPARAMS2))
+				&& mdwc->xhci_ss_compliance_enable
+				&& dwc->maximum_speed == USB_SPEED_SUPER) {
+			dwc3_msm_write_reg(mdwc->base, USB3_PORTSC + 0x10,
+					0x10340);
+			pm_runtime_disable(&hcd_to_xhci(platform_get_drvdata(
+				dwc->xhci))->shared_hcd->self.root_hub->dev);
+		}
+
+		/*
 		 * In some cases it is observed that USB PHY is not going into
 		 * suspend with host mode suspend functionality. Hence disable
 		 * XHCI's runtime PM here if disable_host_mode_pm is set.
@@ -4401,7 +4461,39 @@
 		return -EBUSY;
 	}
 
-	ret = dwc3_msm_suspend(mdwc);
+	ret = dwc3_msm_suspend(mdwc, false);
+	if (!ret)
+		atomic_set(&mdwc->pm_suspended, 1);
+
+	return ret;
+}
+
+static int dwc3_msm_pm_freeze(struct device *dev)
+{
+	int ret = 0;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	dev_dbg(dev, "dwc3-msm PM freeze\n");
+	dbg_event(0xFF, "PM Freeze", 0);
+
+	flush_workqueue(mdwc->dwc3_wq);
+
+	/* Resume the core to make sure we can power collapse it */
+	ret = dwc3_msm_resume(mdwc);
+
+	/*
+	 * PHYs also needed to be power collapsed, so call the notify_disconnect
+	 * before suspend to ensure it.
+	 */
+	usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
+	mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+	if (mdwc->ss_phy) {
+		usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
+		mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+	}
+
+	ret = dwc3_msm_suspend(mdwc, true);
 	if (!ret)
 		atomic_set(&mdwc->pm_suspended, 1);
 
@@ -4425,6 +4517,35 @@
 
 	return 0;
 }
+
+static int dwc3_msm_pm_restore(struct device *dev)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	dev_dbg(dev, "dwc3-msm PM restore\n");
+	dbg_event(0xFF, "PM Restore", 0);
+
+	atomic_set(&mdwc->pm_suspended, 0);
+
+	dwc3_msm_resume(mdwc);
+	pm_runtime_disable(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	/* Restore PHY flags if hibernated in host mode */
+	if (mdwc->otg_state == OTG_STATE_A_HOST) {
+		usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
+		mdwc->hs_phy->flags |= PHY_HOST_MODE;
+		if (mdwc->ss_phy) {
+			usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
+			mdwc->ss_phy->flags |= PHY_HOST_MODE;
+		}
+	}
+
+
+	return 0;
+}
 #endif
 
 #ifdef CONFIG_PM
@@ -4447,7 +4568,7 @@
 	dev_dbg(dev, "DWC3-msm runtime suspend\n");
 	dbg_event(0xFF, "RT Sus", 0);
 
-	return dwc3_msm_suspend(mdwc);
+	return dwc3_msm_suspend(mdwc, false);
 }
 
 static int dwc3_msm_runtime_resume(struct device *dev)
@@ -4463,7 +4584,12 @@
 #endif
 
 static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
+	.suspend	= dwc3_msm_pm_suspend,
+	.resume		= dwc3_msm_pm_resume,
+	.freeze		= dwc3_msm_pm_freeze,
+	.restore	= dwc3_msm_pm_restore,
+	.thaw		= dwc3_msm_pm_restore,
+	.poweroff	= dwc3_msm_pm_suspend,
 	SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
 				dwc3_msm_runtime_idle)
 };
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 35b6351..f221cb4 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -598,9 +598,25 @@
 	return 0;
 }
 
+static void dwc3_omap_complete(struct device *dev)
+{
+	struct dwc3_omap	*omap = dev_get_drvdata(dev);
+
+	if (extcon_get_state(omap->edev, EXTCON_USB))
+		dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
+	else
+		dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
+
+	if (extcon_get_state(omap->edev, EXTCON_USB_HOST))
+		dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
+	else
+		dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
+}
+
 static const struct dev_pm_ops dwc3_omap_dev_pm_ops = {
 
 	SET_SYSTEM_SLEEP_PM_OPS(dwc3_omap_suspend, dwc3_omap_resume)
+	.complete = dwc3_omap_complete,
 };
 
 #define DEV_PM_OPS	(&dwc3_omap_dev_pm_ops)
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 902d36e..d9baac6 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -43,6 +43,8 @@
 static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
 static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
 		struct dwc3_ep *dep, struct dwc3_request *req);
+static int dwc3_ep0_delegate_req(struct dwc3 *dwc,
+		struct usb_ctrlrequest *ctrl);
 
 static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
 {
@@ -358,12 +360,14 @@
 static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
 {
 }
+
 /*
  * ch 9.4.5
  */
 static int dwc3_ep0_handle_status(struct dwc3 *dwc,
 		struct usb_ctrlrequest *ctrl)
 {
+	int ret;
 	struct dwc3_ep		*dep;
 	u32			recip;
 	u32			reg;
@@ -397,7 +401,8 @@
 		 * Function Remote Wake Capable	D0
 		 * Function Remote Wakeup	D1
 		 */
-		break;
+		ret = dwc3_ep0_delegate_req(dwc, ctrl);
+		return ret;
 
 	case USB_RECIP_ENDPOINT:
 		dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
@@ -524,6 +529,9 @@
 			if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
 				/* XXX enable remote wakeup */
 				;
+			ret = dwc3_ep0_delegate_req(dwc, ctrl);
+			if (ret)
+				return ret;
 			break;
 		default:
 			return -EINVAL;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f0e4d5e..f878b8d1 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -414,7 +414,7 @@
 		dwc3_trace(trace_dwc3_gadget, "Command Timed Out");
 		dev_err(dwc->dev, "%s command timeout for %s\n",
 			dwc3_gadget_ep_cmd_string(cmd), dep->name);
-		if (cmd != DWC3_DEPCMD_ENDTRANSFER) {
+		if (DWC3_DEPCMD_CMD(cmd) != DWC3_DEPCMD_ENDTRANSFER) {
 			dwc->ep_cmd_timeout_cnt++;
 			dwc3_notify_event(dwc,
 				DWC3_CONTROLLER_RESTART_USB_SESSION, 0);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index d3e0ca5..90cbb61 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -568,6 +568,7 @@
 config USB_CONFIGFS_F_GSI
 	bool "USB GSI function"
 	select USB_F_GSI
+	select USB_U_ETHER
 	depends on USB_CONFIGFS
 	help
 	  Generic function driver to support h/w acceleration to IPA over GSI.
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index d4c243c..42936f1 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -70,18 +70,11 @@
 	struct usb_phy *phy = udc->transceiver;
 
 	if (phy && (phy->flags & ENABLE_DP_MANUAL_PULLUP)) {
-		u32 temp;
-
 		usb_phy_io_write(phy,
 				ULPI_MISC_A_VBUSVLDEXT |
 				ULPI_MISC_A_VBUSVLDEXTSEL,
 				ULPI_CLR(ULPI_MISC_A));
 
-		/* Notify LINK of VBUS LOW */
-		temp = readl_relaxed(USB_USBCMD);
-		temp &= ~USBCMD_SESS_VLD_CTRL;
-		writel_relaxed(temp, USB_USBCMD);
-
 		/*
 		 * Add memory barrier as it is must to complete
 		 * above USB PHY and Link register writes before
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 939c219..703fb24 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -3951,7 +3951,7 @@
 	usb_del_gadget_udc(&udc->gadget);
 remove_trans:
 	if (udc->transceiver)
-		otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
+		otg_set_peripheral(udc->transceiver->otg, NULL);
 
 	err("error = %i", retval);
 put_transceiver:
@@ -3989,7 +3989,7 @@
 	usb_del_gadget_udc(&udc->gadget);
 
 	if (udc->transceiver) {
-		otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
+		otg_set_peripheral(udc->transceiver->otg, NULL);
 		usb_put_phy(udc->transceiver);
 	}
 	destroy_eps(udc);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index ab2c623..3de95d5 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1563,7 +1563,7 @@
 	return res;
 }
 
-static void fill_ext_compat(struct usb_configuration *c, u8 *buf)
+static int fill_ext_compat(struct usb_configuration *c, u8 *buf)
 {
 	int i, count;
 
@@ -1590,10 +1590,12 @@
 				buf += 23;
 			}
 			count += 24;
-			if (count >= 4096)
-				return;
+			if (count + 24 >= USB_COMP_EP0_OS_DESC_BUFSIZ)
+				return count;
 		}
 	}
+
+	return count;
 }
 
 static int count_ext_prop(struct usb_configuration *c, int interface)
@@ -1638,25 +1640,20 @@
 	struct usb_os_desc *d;
 	struct usb_os_desc_ext_prop *ext_prop;
 	int j, count, n, ret;
-	u8 *start = buf;
 
 	f = c->interface[interface];
+	count = 10; /* header length */
 	for (j = 0; j < f->os_desc_n; ++j) {
 		if (interface != f->os_desc_table[j].if_id)
 			continue;
 		d = f->os_desc_table[j].os_desc;
 		if (d)
 			list_for_each_entry(ext_prop, &d->ext_prop, entry) {
-				/* 4kB minus header length */
-				n = buf - start;
-				if (n >= 4086)
-					return 0;
-
-				count = ext_prop->data_len +
+				n = ext_prop->data_len +
 					ext_prop->name_len + 14;
-				if (count > 4086 - n)
-					return -EINVAL;
-				usb_ext_prop_put_size(buf, count);
+				if (count + n >= USB_COMP_EP0_OS_DESC_BUFSIZ)
+					return count;
+				usb_ext_prop_put_size(buf, n);
 				usb_ext_prop_put_type(buf, ext_prop->type);
 				ret = usb_ext_prop_put_name(buf, ext_prop->name,
 							    ext_prop->name_len);
@@ -1682,11 +1679,12 @@
 				default:
 					return -EINVAL;
 				}
-				buf += count;
+				buf += n;
+				count += n;
 			}
 	}
 
-	return 0;
+	return count;
 }
 
 /*
@@ -1980,6 +1978,7 @@
 			req->complete = composite_setup_complete;
 			buf = req->buf;
 			os_desc_cfg = cdev->os_desc_config;
+			w_length = min_t(u16, w_length, USB_COMP_EP0_OS_DESC_BUFSIZ);
 			memset(buf, 0, w_length);
 			buf[5] = 0x01;
 			switch (ctrl->bRequestType & USB_RECIP_MASK) {
@@ -2003,8 +2002,8 @@
 					count += 16; /* header */
 					put_unaligned_le32(count, buf);
 					buf += 16;
-					fill_ext_compat(os_desc_cfg, buf);
-					value = w_length;
+					value = fill_ext_compat(os_desc_cfg, buf);
+					value = min_t(u16, w_length, value);
 				}
 				break;
 			case USB_RECIP_INTERFACE:
@@ -2033,8 +2032,7 @@
 							      interface, buf);
 					if (value < 0)
 						return value;
-
-					value = w_length;
+					value = min_t(u16, w_length, value);
 				}
 				break;
 			}
@@ -2336,8 +2334,8 @@
 		goto end;
 	}
 
-	/* OS feature descriptor length <= 4kB */
-	cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
+	cdev->os_desc_req->buf = kmalloc(USB_COMP_EP0_OS_DESC_BUFSIZ,
+					 GFP_KERNEL);
 	if (!cdev->os_desc_req->buf) {
 		ret = -ENOMEM;
 		usb_ep_free_request(ep0, cdev->os_desc_req);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index f779fdc30..14c18f3 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1257,9 +1257,9 @@
 
 		cfg = container_of(c, struct config_usb_cfg, c);
 
-		list_for_each_entry_safe(f, tmp, &c->functions, list) {
+		list_for_each_entry_safe_reverse(f, tmp, &c->functions, list) {
 
-			list_move_tail(&f->list, &cfg->func_list);
+			list_move(&f->list, &cfg->func_list);
 			if (f->unbind) {
 				dev_dbg(&gi->cdev.gadget->dev,
 					"unbind function '%s'/%pK\n",
diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c
index 4d4f039..a7166e2 100644
--- a/drivers/usb/gadget/function/f_cdev.c
+++ b/drivers/usb/gadget/function/f_cdev.c
@@ -1256,6 +1256,7 @@
 		ret = -EFAULT;
 	} else {
 		req->length = xfer_size;
+		req->zero = 1;
 		ret = usb_ep_queue(in, req, GFP_KERNEL);
 		if (ret) {
 			pr_err("EP QUEUE failed:%d\n", ret);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 9974332..b9c19d4 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3734,7 +3734,7 @@
 
 	ffs_log("exit");
 
-	return 0;
+	return USB_GADGET_DELAYED_STATUS;
 }
 
 static bool ffs_func_req_match(struct usb_function *f,
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 5ffbf12..7170dc9 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -14,12 +14,18 @@
 #include "f_gsi.h"
 #include "rndis.h"
 
+struct usb_gsi_debugfs {
+	struct dentry *debugfs_root;
+};
+
+static struct usb_gsi_debugfs debugfs;
+
 static bool qti_packet_debug;
 module_param(qti_packet_debug, bool, 0644);
 MODULE_PARM_DESC(qti_packet_debug, "Print QTI Packet's Raw Data");
 
 static struct workqueue_struct *ipa_usb_wq;
-static struct f_gsi *__gsi[IPA_USB_MAX_TETH_PROT_SIZE];
+static struct f_gsi *__gsi[USB_PROT_MAX];
 static void *ipc_log_ctxt;
 
 #define NUM_LOG_PAGES 15
@@ -56,6 +62,15 @@
 static struct gsi_ctrl_pkt *gsi_ctrl_pkt_alloc(unsigned int len, gfp_t flags);
 static void gsi_ctrl_pkt_free(struct gsi_ctrl_pkt *pkt);
 
+static inline bool is_ext_prot_ether(int prot_id)
+{
+	if (prot_id == USB_PROT_RMNET_ETHER ||
+				prot_id == USB_PROT_DPL_ETHER)
+		return true;
+
+	return false;
+}
+
 static inline bool usb_gsi_remote_wakeup_allowed(struct usb_function *f)
 {
 	bool remote_wakeup_allowed;
@@ -196,6 +211,223 @@
 	return ret;
 }
 
+static void debugfs_rw_timer_func(unsigned long arg)
+{
+	struct f_gsi *gsi;
+
+	gsi = (struct f_gsi *)arg;
+
+	if (!atomic_read(&gsi->connected)) {
+		log_event_dbg("%s: gsi not connected..del timer\n", __func__);
+		gsi->debugfs_rw_enable = 0;
+		del_timer(&gsi->debugfs_rw_timer);
+		return;
+	}
+
+	log_event_dbg("%s: calling gsi_wakeup_host\n", __func__);
+	gsi_wakeup_host(gsi);
+
+	if (gsi->debugfs_rw_enable) {
+		log_event_dbg("%s: re-arm the timer\n", __func__);
+		mod_timer(&gsi->debugfs_rw_timer,
+			jiffies + msecs_to_jiffies(gsi->debugfs_rw_interval));
+	}
+}
+
+static struct f_gsi *get_connected_gsi(void)
+{
+	struct f_gsi *connected_gsi;
+	bool gsi_connected = false;
+	unsigned int i;
+
+	for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++) {
+		connected_gsi = __gsi[i];
+		if (connected_gsi && atomic_read(&connected_gsi->connected)) {
+			gsi_connected = true;
+			break;
+		}
+	}
+
+	if (!gsi_connected)
+		connected_gsi = NULL;
+
+	return connected_gsi;
+}
+
+#define DEFAULT_RW_TIMER_INTERVAL 500 /* in ms */
+static ssize_t usb_gsi_rw_write(struct file *file,
+			const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct f_gsi *gsi;
+	u8 input;
+	int ret;
+
+	gsi = get_connected_gsi();
+	if (!gsi) {
+		log_event_dbg("%s: gsi not connected\n", __func__);
+		goto err;
+	}
+
+	if (ubuf == NULL) {
+		log_event_dbg("%s: buffer is Null.\n", __func__);
+		goto err;
+	}
+
+	ret = kstrtou8_from_user(ubuf, count, 0, &input);
+	if (ret) {
+		log_event_err("%s: Invalid value. err:%d\n", __func__, ret);
+		goto err;
+	}
+
+	if (gsi->debugfs_rw_enable == !!input) {
+		if (!!input)
+			log_event_dbg("%s: RW already enabled\n", __func__);
+		else
+			log_event_dbg("%s: RW already disabled\n", __func__);
+		goto err;
+	}
+
+	gsi->debugfs_rw_enable = !!input;
+	if (gsi->debugfs_rw_enable) {
+		init_timer(&gsi->debugfs_rw_timer);
+		gsi->debugfs_rw_timer.data = (unsigned long) gsi;
+		gsi->debugfs_rw_timer.function = debugfs_rw_timer_func;
+
+		/* Use default remote wakeup timer interval if it is not set */
+		if (!gsi->debugfs_rw_interval)
+			gsi->debugfs_rw_interval = DEFAULT_RW_TIMER_INTERVAL;
+		gsi->debugfs_rw_timer.expires = jiffies +
+				msecs_to_jiffies(gsi->debugfs_rw_interval);
+		add_timer(&gsi->debugfs_rw_timer);
+		log_event_dbg("%s: timer initialized\n", __func__);
+	} else {
+		del_timer_sync(&gsi->debugfs_rw_timer);
+		log_event_dbg("%s: timer deleted\n", __func__);
+	}
+
+err:
+	return count;
+}
+
+static int usb_gsi_rw_show(struct seq_file *s, void *unused)
+{
+
+	struct f_gsi *gsi;
+
+	gsi = get_connected_gsi();
+	if (!gsi) {
+		log_event_dbg("%s: gsi not connected\n", __func__);
+		return 0;
+	}
+
+	seq_printf(s, "%d\n", gsi->debugfs_rw_enable);
+
+	return 0;
+}
+
+static int usb_gsi_rw_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, usb_gsi_rw_show, inode->i_private);
+}
+
+static const struct file_operations fops_usb_gsi_rw = {
+	.open = usb_gsi_rw_open,
+	.read = seq_read,
+	.write = usb_gsi_rw_write,
+	.owner = THIS_MODULE,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static ssize_t usb_gsi_rw_timer_write(struct file *file,
+			const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct f_gsi *gsi;
+	u16 timer_val;
+	int ret;
+
+	gsi = get_connected_gsi();
+	if (!gsi) {
+		log_event_dbg("%s: gsi not connected\n", __func__);
+		goto err;
+	}
+
+	if (ubuf == NULL) {
+		log_event_dbg("%s: buffer is NULL.\n", __func__);
+		goto err;
+	}
+
+	ret = kstrtou16_from_user(ubuf, count, 0, &timer_val);
+	if (ret) {
+		log_event_err("%s: Invalid value. err:%d\n", __func__, ret);
+		goto err;
+	}
+
+	if (timer_val <= 0 || timer_val >  10000) {
+		log_event_err("%s: value must be > 0 and < 10000.\n", __func__);
+		goto err;
+	}
+
+	gsi->debugfs_rw_interval = timer_val;
+err:
+	return count;
+}
+
+static int usb_gsi_rw_timer_show(struct seq_file *s, void *unused)
+{
+	struct f_gsi *gsi;
+	unsigned int timer_interval;
+
+	gsi = get_connected_gsi();
+	if (!gsi) {
+		log_event_dbg("%s: gsi not connected\n", __func__);
+		return 0;
+	}
+
+	timer_interval = DEFAULT_RW_TIMER_INTERVAL;
+	if (gsi->debugfs_rw_interval)
+		timer_interval = gsi->debugfs_rw_interval;
+
+	seq_printf(s, "%ums\n", timer_interval);
+
+	return 0;
+}
+
+static int usb_gsi_rw_timer_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, usb_gsi_rw_timer_show, inode->i_private);
+}
+
+static const struct file_operations fops_usb_gsi_rw_timer = {
+	.open = usb_gsi_rw_timer_open,
+	.read = seq_read,
+	.write = usb_gsi_rw_timer_write,
+	.owner = THIS_MODULE,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int usb_gsi_debugfs_init(void)
+{
+	debugfs.debugfs_root = debugfs_create_dir("usb_gsi", NULL);
+	if (!debugfs.debugfs_root)
+		return -ENOMEM;
+
+	debugfs_create_file("remote_wakeup_enable", 0600,
+					debugfs.debugfs_root,
+					__gsi, &fops_usb_gsi_rw);
+	debugfs_create_file("remote_wakeup_interval", 0600,
+					debugfs.debugfs_root,
+					__gsi,
+					&fops_usb_gsi_rw_timer);
+	return 0;
+}
+
+static void usb_gsi_debugfs_exit(void)
+{
+	debugfs_remove_recursive(debugfs.debugfs_root);
+}
+
 /*
  * Callback for when when network interface is up
  * and userspace is ready to answer DHCP requests,  or remote wakeup
@@ -226,7 +458,7 @@
 		log_event_err("%s: Set net_ready_trigger", __func__);
 		gsi->d_port.net_ready_trigger = true;
 
-		if (gsi->prot_id == IPA_USB_ECM) {
+		if (gsi->prot_id == USB_PROT_ECM_IPA) {
 			cpkt_notify_connect = gsi_ctrl_pkt_alloc(0, GFP_ATOMIC);
 			if (IS_ERR(cpkt_notify_connect)) {
 				spin_unlock_irqrestore(&gsi->d_port.lock,
@@ -260,7 +492,7 @@
 		 * Do not post EVT_CONNECTED for RNDIS.
 		 * Data path for RNDIS is enabled on EVT_HOST_READY.
 		 */
-		if (gsi->prot_id != IPA_USB_RNDIS) {
+		if (gsi->prot_id != USB_PROT_RNDIS_IPA) {
 			post_event(&gsi->d_port, EVT_CONNECTED);
 			queue_work(gsi->d_port.ipa_usb_wq,
 					&gsi->d_port.usb_ipa_w);
@@ -311,7 +543,7 @@
 
 	log_event_dbg("%s: USB GSI IN OPS Completed", __func__);
 	in_params->client =
-		(gsi->prot_id != IPA_USB_DIAG) ? IPA_CLIENT_USB_CONS :
+		(gsi->prot_id != USB_PROT_DIAG_IPA) ? IPA_CLIENT_USB_CONS :
 						IPA_CLIENT_USB_DPL_CONS;
 	in_params->ipa_ep_cfg.mode.mode = IPA_BASIC;
 	in_params->teth_prot = gsi->prot_id;
@@ -395,7 +627,7 @@
 	conn_params->usb_to_ipa_xferrscidx =
 			d_port->out_xfer_rsc_index;
 	conn_params->usb_to_ipa_xferrscidx_valid =
-			(gsi->prot_id != IPA_USB_DIAG) ? true : false;
+			(gsi->prot_id != USB_PROT_DIAG_IPA) ? true : false;
 	conn_params->ipa_to_usb_xferrscidx_valid = true;
 	conn_params->teth_prot = gsi->prot_id;
 	conn_params->teth_prot_params.max_xfer_size_bytes_to_dev = 23700;
@@ -440,7 +672,7 @@
 	d_port->in_request.db_reg_phs_addr_msb =
 		ipa_in_channel_out_params.db_reg_phs_addr_msb;
 
-	if (gsi->prot_id != IPA_USB_DIAG) {
+	if (gsi->prot_id != USB_PROT_DIAG_IPA) {
 		d_port->out_channel_handle =
 			ipa_out_channel_out_params.clnt_hdl;
 		d_port->out_request.db_reg_phs_addr_lsb =
@@ -690,6 +922,11 @@
 			log_event_dbg("%s: ST_CON_IN_PROG_EVT_HOST_READY",
 					 __func__);
 		} else if (event == EVT_CONNECTED) {
+			if (peek_event(d_port) == EVT_SUSPEND) {
+				log_event_dbg("%s: ST_CON_IN_PROG_EVT_SUSPEND",
+					 __func__);
+				break;
+			}
 			ipa_data_path_enable(d_port);
 			d_port->sm_state = STATE_CONNECTED;
 			log_event_dbg("%s: ST_CON_IN_PROG_EVT_CON %d",
@@ -1159,7 +1396,8 @@
 
 	switch (cmd) {
 	case QTI_CTRL_MODEM_OFFLINE:
-		if (gsi->prot_id == IPA_USB_DIAG) {
+		if (gsi->prot_id == USB_PROT_DIAG_IPA ||
+				gsi->prot_id == USB_PROT_DPL_ETHER) {
 			log_event_dbg("%s:Modem Offline not handled", __func__);
 			goto exit_ioctl;
 		}
@@ -1177,7 +1415,8 @@
 		gsi_ctrl_send_notification(gsi);
 		break;
 	case QTI_CTRL_MODEM_ONLINE:
-		if (gsi->prot_id == IPA_USB_DIAG) {
+		if (gsi->prot_id == USB_PROT_DIAG_IPA ||
+				gsi->prot_id == USB_PROT_DPL_ETHER) {
 			log_event_dbg("%s:Modem Online not handled", __func__);
 			goto exit_ioctl;
 		}
@@ -1186,7 +1425,8 @@
 		break;
 	case QTI_CTRL_GET_LINE_STATE:
 		val = atomic_read(&gsi->connected);
-		if (gsi->prot_id == IPA_USB_RMNET)
+		if (gsi->prot_id == USB_PROT_RMNET_IPA ||
+				gsi->prot_id == USB_PROT_RMNET_ETHER)
 			val = gsi->rmnet_dtr_status;
 
 		ret = copy_to_user((void __user *)arg, &val, sizeof(val));
@@ -1207,32 +1447,43 @@
 			break;
 		}
 
-		if (gsi->prot_id == IPA_USB_DIAG &&
+		if ((gsi->prot_id == USB_PROT_DIAG_IPA ||
+				gsi->prot_id == USB_PROT_DPL_ETHER) &&
 				(gsi->d_port.in_channel_handle == -EINVAL)) {
 			ret = -EAGAIN;
 			break;
 		}
 
-		if (gsi->d_port.in_channel_handle == -EINVAL &&
-			gsi->d_port.out_channel_handle == -EINVAL) {
-			ret = -EAGAIN;
-			break;
+		if (gsi->prot_id != USB_PROT_GPS_CTRL) {
+			if (gsi->d_port.in_channel_handle == -EINVAL &&
+				gsi->d_port.out_channel_handle == -EINVAL) {
+				ret = -EAGAIN;
+				break;
+			}
+			info.ph_ep_info.ep_type = GSI_MBIM_DATA_EP_TYPE_HSUSB;
+			info.ph_ep_info.peripheral_iface_id = gsi->data_id;
+		} else {
+			info.ph_ep_info.ep_type = GSI_MBIM_DATA_EP_TYPE_HSUSB;
+			info.ph_ep_info.peripheral_iface_id = gsi->ctrl_id;
 		}
 
-		info.ph_ep_info.ep_type = GSI_MBIM_DATA_EP_TYPE_HSUSB;
-		info.ph_ep_info.peripheral_iface_id = gsi->data_id;
-		info.ipa_ep_pair.cons_pipe_num =
-		(gsi->prot_id == IPA_USB_DIAG) ? -1 :
-				gsi->d_port.out_channel_handle;
-		info.ipa_ep_pair.prod_pipe_num = gsi->d_port.in_channel_handle;
-
 		log_event_dbg("%s: prot id :%d ep_type:%d intf:%d",
 				__func__, gsi->prot_id, info.ph_ep_info.ep_type,
 				info.ph_ep_info.peripheral_iface_id);
+		if (gsi->prot_id != USB_PROT_GPS_CTRL) {
+			info.ipa_ep_pair.cons_pipe_num =
+			(gsi->prot_id == USB_PROT_DIAG_IPA ||
+				gsi->prot_id == USB_PROT_DPL_ETHER) ? -1 :
+					gsi->d_port.out_channel_handle;
+			info.ipa_ep_pair.prod_pipe_num =
+					gsi->d_port.in_channel_handle;
 
-		log_event_dbg("%s: ipa_cons_idx:%d ipa_prod_idx:%d",
-				__func__, info.ipa_ep_pair.cons_pipe_num,
-				info.ipa_ep_pair.prod_pipe_num);
+
+			log_event_dbg("%s: ipa_cons_idx:%d ipa_prod_idx:%d",
+					__func__,
+					info.ipa_ep_pair.cons_pipe_num,
+					info.ipa_ep_pair.prod_pipe_num);
+		}
 
 		ret = copy_to_user((void __user *)arg, &info,
 			sizeof(info));
@@ -1328,8 +1579,8 @@
 static int gsi_function_ctrl_port_init(struct f_gsi *gsi)
 {
 	int ret;
+	char *cdev_name = NULL;
 	int sz = GSI_CTRL_NAME_LEN;
-	bool ctrl_dev_create = true;
 
 	INIT_LIST_HEAD(&gsi->c_port.cpkt_req_q);
 	INIT_LIST_HEAD(&gsi->c_port.cpkt_resp_q);
@@ -1338,17 +1589,33 @@
 
 	init_waitqueue_head(&gsi->c_port.read_wq);
 
-	if (gsi->prot_id == IPA_USB_RMNET)
-		strlcat(gsi->c_port.name, GSI_RMNET_CTRL_NAME, sz);
-	else if (gsi->prot_id == IPA_USB_MBIM)
-		strlcat(gsi->c_port.name, GSI_MBIM_CTRL_NAME, sz);
-	else if (gsi->prot_id == IPA_USB_DIAG)
-		strlcat(gsi->c_port.name, GSI_DPL_CTRL_NAME, sz);
-	else
-		ctrl_dev_create = false;
+	switch (gsi->prot_id) {
+	case USB_PROT_RMNET_IPA:
+		cdev_name = GSI_RMNET_CTRL_NAME;
+		break;
+	case USB_PROT_RMNET_ETHER:
+		cdev_name = ETHER_RMNET_CTRL_NAME;
+		break;
+	case USB_PROT_MBIM_IPA:
+		cdev_name = GSI_MBIM_CTRL_NAME;
+		break;
+	case USB_PROT_DIAG_IPA:
+		cdev_name = GSI_DPL_CTRL_NAME;
+		break;
+	case USB_PROT_DPL_ETHER:
+		cdev_name = ETHER_DPL_CTRL_NAME;
+		break;
+	case USB_PROT_GPS_CTRL:
+		cdev_name = GSI_GPS_CTRL_NAME;
+		break;
+	default:
+		break;
+	}
 
-	if (!ctrl_dev_create)
+	if (!cdev_name)
 		return 0;
+	else
+		strlcat(gsi->c_port.name, cdev_name, sz);
 
 	gsi->c_port.ctrl_device.name = gsi->c_port.name;
 	gsi->c_port.ctrl_device.fops = &gsi_ctrl_dev_fops;
@@ -1419,6 +1686,12 @@
 	} else {
 		log_event_dbg("%s: posting HOST_READY\n", __func__);
 		post_event(d_port, EVT_HOST_READY);
+		/*
+		 * If host supports flow control with RNDIS_MSG_INIT then
+		 * mark the flag to true. This flag will be used further to
+		 * enable the flow control on resume path.
+		 */
+		gsi->host_supports_flow_control = true;
 	}
 
 	queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
@@ -1512,7 +1785,7 @@
 		event->wValue = cpu_to_le16(0);
 		event->wLength = cpu_to_le16(0);
 
-		if (gsi->prot_id == IPA_USB_RNDIS) {
+		if (gsi->prot_id == USB_PROT_RNDIS_IPA) {
 			data = req->buf;
 			data[0] = cpu_to_le32(1);
 			data[1] = cpu_to_le32(0);
@@ -1737,7 +2010,7 @@
 		/* read the request; process it later */
 		value = w_length;
 		req->context = gsi;
-		if (gsi->prot_id == IPA_USB_RNDIS)
+		if (gsi->prot_id == USB_PROT_RNDIS_IPA)
 			req->complete = gsi_rndis_command_complete;
 		else
 			req->complete = gsi_ctrl_cmd_complete;
@@ -1749,7 +2022,7 @@
 		if (w_value || w_index != id)
 			goto invalid;
 
-		if (gsi->prot_id == IPA_USB_RNDIS) {
+		if (gsi->prot_id == USB_PROT_RNDIS_IPA) {
 			/* return the result */
 			buf = rndis_get_next_response(gsi->params, &n);
 			if (buf) {
@@ -1785,7 +2058,8 @@
 	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
 			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
 		line_state = (w_value & GSI_CTRL_DTR ? true : false);
-		if (gsi->prot_id == IPA_USB_RMNET)
+		if (gsi->prot_id == USB_PROT_RMNET_IPA ||
+				gsi->prot_id == USB_PROT_RMNET_ETHER)
 			gsi->rmnet_dtr_status = line_state;
 		log_event_dbg("%s: USB_CDC_REQ_SET_CONTROL_LINE_STATE DTR:%d\n",
 						__func__, line_state);
@@ -1882,9 +2156,10 @@
 	struct f_gsi *gsi = func_to_gsi(f);
 
 	/* RNDIS, RMNET and DPL only support alt 0*/
-	if (intf == gsi->ctrl_id || gsi->prot_id == IPA_USB_RNDIS ||
-			gsi->prot_id == IPA_USB_RMNET ||
-			gsi->prot_id == IPA_USB_DIAG)
+	if (intf == gsi->ctrl_id || gsi->prot_id == USB_PROT_RNDIS_IPA ||
+			gsi->prot_id == USB_PROT_RMNET_IPA ||
+			gsi->prot_id == USB_PROT_DIAG_IPA ||
+			is_ext_prot_ether(gsi->prot_id))
 		return 0;
 	else if (intf == gsi->data_id)
 		return gsi->data_interface_up;
@@ -2003,7 +2278,8 @@
 	log_event_dbg("intf=%u, alt=%u", intf, alt);
 
 	/* Control interface has only altsetting 0 */
-	if (intf == gsi->ctrl_id || gsi->prot_id == IPA_USB_RMNET) {
+	if (intf == gsi->ctrl_id || gsi->prot_id == USB_PROT_RMNET_IPA ||
+				gsi->prot_id == USB_PROT_RMNET_ETHER) {
 		if (alt != 0)
 			goto fail;
 
@@ -2037,10 +2313,11 @@
 	if (intf == gsi->data_id) {
 		gsi->d_port.net_ready_trigger = false;
 		/* for rndis and rmnet alt is always 0 update alt accordingly */
-		if (gsi->prot_id == IPA_USB_RNDIS ||
-				gsi->prot_id == IPA_USB_RMNET ||
-				gsi->prot_id == IPA_USB_DIAG)
-				alt = 1;
+		if (gsi->prot_id == USB_PROT_RNDIS_IPA ||
+				gsi->prot_id == USB_PROT_RMNET_IPA ||
+				gsi->prot_id == USB_PROT_DIAG_IPA ||
+				is_ext_prot_ether(gsi->prot_id))
+			alt = 1;
 
 		if (alt > 1)
 			goto notify_ep_disable;
@@ -2067,8 +2344,9 @@
 			}
 
 			/* Configure EPs for GSI */
-			if (gsi->d_port.in_ep) {
-				if (gsi->prot_id == IPA_USB_DIAG)
+			if (gsi->d_port.in_ep &&
+				gsi->prot_id <= USB_PROT_DIAG_IPA) {
+				if (gsi->prot_id == USB_PROT_DIAG_IPA)
 					gsi->d_port.in_ep->ep_intr_num = 3;
 				else
 					gsi->d_port.in_ep->ep_intr_num = 2;
@@ -2077,7 +2355,8 @@
 						GSI_EP_OP_CONFIG);
 			}
 
-			if (gsi->d_port.out_ep) {
+			if (gsi->d_port.out_ep &&
+				gsi->prot_id <= USB_PROT_DIAG_IPA) {
 				gsi->d_port.out_ep->ep_intr_num = 1;
 				usb_gsi_ep_op(gsi->d_port.out_ep,
 					&gsi->d_port.out_request,
@@ -2086,7 +2365,17 @@
 
 			gsi->d_port.gadget = cdev->gadget;
 
-			if (gsi->prot_id == IPA_USB_RNDIS) {
+			if (is_ext_prot_ether(gsi->prot_id)) {
+				net = gether_connect(&gsi->d_port.gether_port);
+				if (IS_ERR(net)) {
+					pr_err("%s:gether_connect err:%ld\n",
+					__func__, PTR_ERR(net));
+					goto notify_ep_disable;
+				}
+				gsi->d_port.gether_port.cdc_filter = 0;
+			}
+
+			if (gsi->prot_id == USB_PROT_RNDIS_IPA) {
 				gsi_rndis_open(gsi);
 				net = gsi_rndis_get_netdev("rndis0");
 				if (IS_ERR(net))
@@ -2098,7 +2387,7 @@
 						&gsi->d_port.cdc_filter);
 			}
 
-			if (gsi->prot_id == IPA_USB_ECM)
+			if (gsi->prot_id == USB_PROT_ECM_IPA)
 				gsi->d_port.cdc_filter = DEFAULT_FILTER;
 
 			/*
@@ -2106,7 +2395,7 @@
 			 * handler which is invoked when the host sends the
 			 * GEN_CURRENT_PACKET_FILTER message.
 			 */
-			if (gsi->prot_id != IPA_USB_RNDIS)
+			if (gsi->prot_id != USB_PROT_RNDIS_IPA)
 				post_event(&gsi->d_port,
 						EVT_CONNECT_IN_PROGRESS);
 			queue_work(gsi->d_port.ipa_usb_wq,
@@ -2127,7 +2416,8 @@
 	atomic_set(&gsi->connected, 1);
 
 	/* send 0 len pkt to qti to notify state change */
-	if (gsi->prot_id == IPA_USB_DIAG)
+	if (gsi->prot_id == USB_PROT_DIAG_IPA ||
+				gsi->prot_id == USB_PROT_DPL_ETHER)
 		gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
 
 	return 0;
@@ -2145,10 +2435,11 @@
 
 	atomic_set(&gsi->connected, 0);
 
-	if (gsi->prot_id == IPA_USB_RNDIS)
+	if (gsi->prot_id == USB_PROT_RNDIS_IPA)
 		rndis_uninit(gsi->params);
 
-	if (gsi->prot_id == IPA_USB_RMNET)
+	if (gsi->prot_id == USB_PROT_RMNET_IPA ||
+				gsi->prot_id == USB_PROT_RMNET_ETHER)
 		gsi->rmnet_dtr_status = false;
 
 	/* Disable Control Path */
@@ -2170,7 +2461,15 @@
 
 	gsi->data_interface_up = false;
 
+	gsi->host_supports_flow_control = false;
+
 	log_event_dbg("%s deactivated", gsi->function.name);
+
+	if (is_ext_prot_ether(gsi->prot_id)) {
+		gether_disconnect(&gsi->d_port.gether_port);
+		return;
+	}
+
 	ipa_disconnect_handler(&gsi->d_port);
 	post_event(&gsi->d_port, EVT_DISCONNECTED);
 	queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
@@ -2181,12 +2480,25 @@
 	bool block_db;
 	struct f_gsi *gsi = func_to_gsi(f);
 
-	/* Check if function is already suspended in gsi_func_suspend() */
-	if (f->func_is_suspended) {
+	/* Check if function is already suspended in gsi_func_suspend()
+	 * Or func_suspend would have bailed out earlier if func_remote_wakeup
+	 * wasn't enabled.
+	 */
+	if (f->func_is_suspended && (gsi->d_port.sm_state == STATE_SUSPENDED ||
+			gsi->d_port.sm_state == STATE_SUSPEND_IN_PROGRESS)) {
 		log_event_dbg("%s: func already suspended, return\n", __func__);
 		return;
 	}
 
+	/*
+	 * GPS doesn't use any data interface, hence bail out as there is no
+	 * GSI specific handling needed.
+	 */
+	if (gsi->prot_id == USB_PROT_GPS_CTRL) {
+		log_event_dbg("%s: suspend done\n", __func__);
+		return;
+	}
+
 	block_db = true;
 	usb_gsi_ep_op(gsi->d_port.in_ep, (void *)&block_db,
 			GSI_EP_OP_SET_CLR_BLOCK_DBL);
@@ -2216,13 +2528,20 @@
 	/* Check any pending cpkt, and queue immediately on resume */
 	gsi_ctrl_send_notification(gsi);
 
+	if (gsi->prot_id == USB_PROT_GPS_CTRL) {
+		log_event_dbg("%s: resume done\n", __func__);
+		return;
+	}
+
 	/*
 	 * Linux host does not send RNDIS_MSG_INIT or non-zero
 	 * RNDIS_MESSAGE_PACKET_FILTER after performing bus resume.
+	 * Check whether host supports flow_control are not. If yes
 	 * Trigger state machine explicitly on resume.
 	 */
-	if (gsi->prot_id == IPA_USB_RNDIS &&
-			!usb_gsi_remote_wakeup_allowed(f))
+	if (gsi->prot_id == USB_PROT_RNDIS_IPA &&
+			!usb_gsi_remote_wakeup_allowed(f) &&
+			gsi->host_supports_flow_control)
 		rndis_flow_control(gsi->params, false);
 
 	post_event(&gsi->d_port, EVT_RESUMED);
@@ -2231,6 +2550,14 @@
 	log_event_dbg("%s: completed", __func__);
 }
 
+static int gsi_get_status(struct usb_function *f)
+{
+	unsigned int remote_wakeup_en_status = f->func_wakeup_allowed ? 1 : 0;
+
+	return (remote_wakeup_en_status << FUNC_WAKEUP_ENABLE_SHIFT) |
+		(1 << FUNC_WAKEUP_CAPABLE_SHIFT);
+}
+
 static int gsi_func_suspend(struct usb_function *f, u8 options)
 {
 	bool func_wakeup_allowed;
@@ -2327,7 +2654,7 @@
 		info->data_nop_desc->bInterfaceNumber = gsi->data_id;
 
 	/* allocate instance-specific endpoints */
-	if (info->fs_in_desc) {
+	if (info->fs_in_desc && gsi->prot_id <= USB_PROT_DIAG_IPA) {
 		ep = usb_ep_autoconfig_by_name(cdev->gadget,
 				info->fs_in_desc, info->in_epname);
 		if (!ep)
@@ -2335,9 +2662,17 @@
 		gsi->d_port.in_ep = ep;
 		msm_ep_config(gsi->d_port.in_ep, NULL);
 		ep->driver_data = cdev;	/* claim */
+	} else {
+		if (info->fs_in_desc) {
+			ep = usb_ep_autoconfig(cdev->gadget, info->fs_in_desc);
+			if (!ep)
+				goto fail;
+			gsi->d_port.in_ep = ep;
+			ep->driver_data = cdev; /* claim */
+		}
 	}
 
-	if (info->fs_out_desc) {
+	if (info->fs_out_desc && gsi->prot_id <= USB_PROT_DIAG_IPA) {
 		ep = usb_ep_autoconfig_by_name(cdev->gadget,
 				info->fs_out_desc, info->out_epname);
 		if (!ep)
@@ -2345,6 +2680,14 @@
 		gsi->d_port.out_ep = ep;
 		msm_ep_config(gsi->d_port.out_ep, NULL);
 		ep->driver_data = cdev;	/* claim */
+	} else {
+		if (info->fs_out_desc) {
+			ep = usb_ep_autoconfig(cdev->gadget, info->fs_out_desc);
+			if (!ep)
+				goto fail;
+			gsi->d_port.out_ep = ep;
+			ep->driver_data = cdev; /* claim */
+		}
 	}
 
 	if (info->fs_notify_desc) {
@@ -2475,14 +2818,17 @@
 	struct gsi_function_bind_info info = {0};
 	struct f_gsi *gsi = func_to_gsi(f);
 	struct rndis_params *params;
+	struct net_device *net;
+	char *name = NULL;
 	int status;
 	__u8  class;
 	__u8  subclass;
 	__u8  proto;
 
 
-	if (gsi->prot_id == IPA_USB_RMNET ||
-		gsi->prot_id == IPA_USB_DIAG)
+	if (gsi->prot_id == USB_PROT_RMNET_IPA ||
+		gsi->prot_id == USB_PROT_DIAG_IPA ||
+		is_ext_prot_ether(gsi->prot_id))
 		gsi->ctrl_id = -ENODEV;
 	else {
 		status = gsi->ctrl_id = usb_interface_id(c, f);
@@ -2490,12 +2836,14 @@
 			goto fail;
 	}
 
-	status = gsi->data_id = usb_interface_id(c, f);
-	if (status < 0)
-		goto fail;
+	if (gsi->prot_id != USB_PROT_GPS_CTRL) {
+		status = gsi->data_id = usb_interface_id(c, f);
+		if (status < 0)
+			goto fail;
+	}
 
 	switch (gsi->prot_id) {
-	case IPA_USB_RNDIS:
+	case USB_PROT_RNDIS_IPA:
 		info.string_defs = rndis_gsi_string_defs;
 		info.ctrl_desc = &rndis_gsi_control_intf;
 		info.ctrl_str_idx = 0;
@@ -2641,7 +2989,7 @@
 		info.ctrl_desc->bInterfaceProtocol = proto;
 
 		break;
-	case IPA_USB_MBIM:
+	case USB_PROT_MBIM_IPA:
 		info.string_defs = mbim_gsi_string_defs;
 		info.ctrl_desc = &mbim_gsi_control_intf;
 		info.ctrl_str_idx = 0;
@@ -2688,7 +3036,8 @@
 				c->bConfigurationValue + '0';
 		}
 		break;
-	case IPA_USB_RMNET:
+	case USB_PROT_RMNET_IPA:
+	case USB_PROT_RMNET_ETHER:
 		info.string_defs = rmnet_gsi_string_defs;
 		info.data_desc = &rmnet_gsi_interface_desc;
 		info.data_str_idx = 0;
@@ -2713,8 +3062,9 @@
 		info.out_req_buf_len = GSI_OUT_RMNET_BUF_LEN;
 		info.out_req_num_buf = GSI_NUM_OUT_BUFFERS;
 		info.notify_buf_len = sizeof(struct usb_cdc_notification);
+		name = "usb_rmnet";
 		break;
-	case IPA_USB_ECM:
+	case USB_PROT_ECM_IPA:
 		info.string_defs = ecm_gsi_string_defs;
 		info.ctrl_desc = &ecm_gsi_control_intf;
 		info.ctrl_str_idx = 0;
@@ -2763,7 +3113,8 @@
 		gsi->d_port.ipa_init_params.host_ethaddr[5]);
 		info.string_defs[1].s = gsi->ethaddr;
 		break;
-	case IPA_USB_DIAG:
+	case USB_PROT_DIAG_IPA:
+	case USB_PROT_DPL_ETHER:
 		info.string_defs = qdss_gsi_string_defs;
 		info.data_desc = &qdss_gsi_data_intf_desc;
 		info.data_str_idx = 0;
@@ -2778,6 +3129,19 @@
 		info.in_req_buf_len = 16384;
 		info.in_req_num_buf = GSI_NUM_IN_BUFFERS;
 		info.notify_buf_len = sizeof(struct usb_cdc_notification);
+		name = "dpl_usb";
+		break;
+	case USB_PROT_GPS_CTRL:
+		info.string_defs = gps_string_defs;
+		info.ctrl_str_idx = 0;
+		info.ctrl_desc = &gps_interface_desc;
+		info.fs_notify_desc = &gps_fs_notify_desc;
+		info.hs_notify_desc = &gps_hs_notify_desc;
+		info.ss_notify_desc = &gps_ss_notify_desc;
+		info.fs_desc_hdr = gps_fs_function;
+		info.hs_desc_hdr = gps_hs_function;
+		info.ss_desc_hdr = gps_ss_function;
+		info.notify_buf_len = sizeof(struct usb_cdc_notification);
 		break;
 	default:
 		log_event_err("%s: Invalid prot id %d", __func__,
@@ -2789,6 +3153,32 @@
 	if (status)
 		goto dereg_rndis;
 
+	if (gsi->prot_id == USB_PROT_GPS_CTRL)
+		goto skip_ipa_init;
+
+	if (is_ext_prot_ether(gsi->prot_id)) {
+		if (!name)
+			return -EINVAL;
+
+		gsi->d_port.gether_port.in_ep = gsi->d_port.in_ep;
+		gsi->d_port.gether_port.out_ep = gsi->d_port.out_ep;
+			net = gether_setup_name_default(name);
+			if (IS_ERR(net)) {
+				pr_err("%s: gether_setup failed\n", __func__);
+				return PTR_ERR(net);
+			}
+			gsi->d_port.gether_port.ioport = netdev_priv(net);
+			gether_set_gadget(net, c->cdev->gadget);
+			status = gether_register_netdev(net);
+			if (status < 0) {
+				pr_err("%s: gether_register_netdev failed\n",
+					__func__);
+				free_netdev(net);
+				return status;
+			}
+		goto skip_ipa_init;
+	}
+
 	status = ipa_register_ipa_ready_cb(ipa_ready_callback, gsi);
 	if (!status) {
 		log_event_info("%s: ipa is not ready", __func__);
@@ -2814,6 +3204,7 @@
 
 	gsi->d_port.sm_state = STATE_INITIALIZED;
 
+skip_ipa_init:
 	DBG(cdev, "%s: %s speed IN/%s OUT/%s NOTIFY/%s\n",
 			f->name,
 			gadget_is_superspeed(c->cdev->gadget) ? "super" :
@@ -2836,6 +3227,12 @@
 {
 	struct f_gsi *gsi = func_to_gsi(f);
 
+	if (is_ext_prot_ether(gsi->prot_id)) {
+		gether_cleanup(gsi->d_port.gether_port.ioport);
+		gsi->d_port.gether_port.ioport = NULL;
+		goto skip_ipa_dinit;
+	}
+
 	/*
 	 * Use drain_workqueue to accomplish below conditions:
 	 * 1. Make sure that any running work completed
@@ -2847,12 +3244,13 @@
 	drain_workqueue(gsi->d_port.ipa_usb_wq);
 	ipa_usb_deinit_teth_prot(gsi->prot_id);
 
-	if (gsi->prot_id == IPA_USB_RNDIS) {
+skip_ipa_dinit:
+	if (gsi->prot_id == USB_PROT_RNDIS_IPA) {
 		gsi->d_port.sm_state = STATE_UNINITIALIZED;
 		rndis_deregister(gsi->params);
 	}
 
-	if (gsi->prot_id == IPA_USB_MBIM)
+	if (gsi->prot_id == USB_PROT_MBIM_IPA)
 		mbim_gsi_ext_config_desc.function.subCompatibleID[0] = 0;
 
 	if (gadget_is_superspeed(c->cdev->gadget)) {
@@ -2883,33 +3281,38 @@
 static int gsi_bind_config(struct f_gsi *gsi)
 {
 	int status = 0;
-	enum ipa_usb_teth_prot prot_id = gsi->prot_id;
 
-	log_event_dbg("%s: prot id %d", __func__, prot_id);
+	log_event_dbg("%s: prot id %d", __func__, gsi->prot_id);
 
-	switch (prot_id) {
-	case IPA_USB_RNDIS:
+	switch (gsi->prot_id) {
+	case USB_PROT_RNDIS_IPA:
 		gsi->function.name = "rndis";
 		gsi->function.strings = rndis_gsi_strings;
 		break;
-	case IPA_USB_ECM:
+	case USB_PROT_ECM_IPA:
 		gsi->function.name = "cdc_ethernet";
 		gsi->function.strings = ecm_gsi_strings;
 		break;
-	case IPA_USB_RMNET:
+	case USB_PROT_RMNET_IPA:
+	case USB_PROT_RMNET_ETHER:
 		gsi->function.name = "rmnet";
 		gsi->function.strings = rmnet_gsi_strings;
 		break;
-	case IPA_USB_MBIM:
+	case USB_PROT_MBIM_IPA:
 		gsi->function.name = "mbim";
 		gsi->function.strings = mbim_gsi_strings;
 		break;
-	case IPA_USB_DIAG:
+	case USB_PROT_DIAG_IPA:
+	case USB_PROT_DPL_ETHER:
 		gsi->function.name = "dpl";
 		gsi->function.strings = qdss_gsi_strings;
 		break;
+	case USB_PROT_GPS_CTRL:
+		gsi->function.name = "gps";
+		gsi->function.strings = gps_strings;
+		break;
 	default:
-		log_event_err("%s: invalid prot id %d", __func__, prot_id);
+		log_event_err("%s: invalid prot id %d", __func__, gsi->prot_id);
 		return -EINVAL;
 	}
 
@@ -2922,6 +3325,7 @@
 	gsi->function.disable = gsi_disable;
 	gsi->function.free_func = gsi_free_func;
 	gsi->function.suspend = gsi_suspend;
+	gsi->function.get_status = gsi_get_status;
 	gsi->function.func_suspend = gsi_func_suspend;
 	gsi->function.resume = gsi_resume;
 
@@ -3164,7 +3568,7 @@
 static int gsi_set_inst_name(struct usb_function_instance *fi,
 	const char *name)
 {
-	int prot_id, name_len, ret = 0;
+	int name_len, prot_id, ret = 0;
 	struct gsi_opts *opts;
 	struct f_gsi *gsi;
 
@@ -3181,7 +3585,7 @@
 		return -EINVAL;
 	}
 
-	if (prot_id == IPA_USB_RNDIS)
+	if (prot_id == USB_PROT_RNDIS_IPA)
 		config_group_init_type_name(&opts->func_inst.group, "",
 					    &gsi_func_rndis_type);
 
@@ -3249,7 +3653,7 @@
 		return -ENOMEM;
 	}
 
-	for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++) {
+	for (i = 0; i < USB_PROT_MAX; i++) {
 		__gsi[i] = gsi_function_init();
 		if (IS_ERR(__gsi[i]))
 			return PTR_ERR(__gsi[i]);
@@ -3259,6 +3663,7 @@
 	if (!ipc_log_ctxt)
 		pr_err("%s: Err allocating ipc_log_ctxt\n", __func__);
 
+	usb_gsi_debugfs_init();
 	return usb_function_register(&gsiusb_func);
 }
 module_init(fgsi_init);
@@ -3272,9 +3677,10 @@
 	if (ipc_log_ctxt)
 		ipc_log_context_destroy(ipc_log_ctxt);
 
-	for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++)
+	for (i = 0; i < USB_PROT_MAX; i++)
 		kfree(__gsi[i]);
 
+	usb_gsi_debugfs_exit();
 	usb_function_unregister(&gsiusb_func);
 }
 module_exit(fgsi_exit);
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index c6e64fd..cd146a0 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -26,10 +26,17 @@
 #include <linux/debugfs.h>
 #include <linux/ipa_usb.h>
 #include <linux/ipc_logging.h>
+#include <linux/timer.h>
+
+#include "u_ether.h"
 
 #define GSI_RMNET_CTRL_NAME "rmnet_ctrl"
 #define GSI_MBIM_CTRL_NAME "android_mbim"
 #define GSI_DPL_CTRL_NAME "dpl_ctrl"
+#define ETHER_RMNET_CTRL_NAME "rmnet_ctrl0"
+#define ETHER_DPL_CTRL_NAME "dpl_ctrl0"
+#define GSI_GPS_CTRL_NAME "gps"
+
 #define GSI_CTRL_NAME_LEN (sizeof(GSI_MBIM_CTRL_NAME)+2)
 #define GSI_MAX_CTRL_PKT_SIZE 4096
 #define GSI_CTRL_DTR (1 << 0)
@@ -114,6 +121,22 @@
 	RNDIS_ID_MAX,
 };
 
+enum usb_prot_id {
+	/* accelerated: redefined from ipa_usb.h, do not change order */
+	USB_PROT_RNDIS_IPA,
+	USB_PROT_ECM_IPA,
+	USB_PROT_RMNET_IPA,
+	USB_PROT_MBIM_IPA,
+	USB_PROT_DIAG_IPA,
+
+	/* non-accelerated */
+	USB_PROT_RMNET_ETHER,
+	USB_PROT_DPL_ETHER,
+	USB_PROT_GPS_CTRL,
+
+	USB_PROT_MAX,
+};
+
 #define MAXQUEUELEN 128
 struct event_queue {
 	u8 event[MAXQUEUELEN];
@@ -228,6 +251,7 @@
 	enum connection_state sm_state;
 	struct event_queue evt_q;
 	wait_queue_head_t wait_for_ipa_ready;
+	struct gether gether_port;
 
 	/* Track these for debugfs */
 	struct ipa_usb_xdci_chan_params ipa_in_channel_params;
@@ -237,7 +261,7 @@
 
 struct f_gsi {
 	struct usb_function function;
-	enum ipa_usb_teth_prot prot_id;
+	enum usb_prot_id prot_id;
 	int ctrl_id;
 	int data_id;
 	u32 vendorID;
@@ -254,6 +278,11 @@
 	struct gsi_data_port d_port;
 	struct gsi_ctrl_port c_port;
 	bool rmnet_dtr_status;
+	/* To test remote wakeup using debugfs */
+	struct timer_list debugfs_rw_timer;
+	u8 debugfs_rw_enable;
+	u16 debugfs_rw_interval;
+	bool host_supports_flow_control;
 };
 
 static inline struct f_gsi *func_to_gsi(struct usb_function *f)
@@ -285,21 +314,27 @@
 			    func_inst.group);
 }
 
-static enum ipa_usb_teth_prot name_to_prot_id(const char *name)
+static int name_to_prot_id(const char *name)
 {
 	if (!name)
 		goto error;
 
 	if (!strncasecmp(name, "rndis", MAX_INST_NAME_LEN))
-		return IPA_USB_RNDIS;
+		return USB_PROT_RNDIS_IPA;
 	if (!strncasecmp(name, "ecm", MAX_INST_NAME_LEN))
-		return IPA_USB_ECM;
+		return USB_PROT_ECM_IPA;
 	if (!strncasecmp(name, "rmnet", MAX_INST_NAME_LEN))
-		return IPA_USB_RMNET;
+		return USB_PROT_RMNET_IPA;
 	if (!strncasecmp(name, "mbim", MAX_INST_NAME_LEN))
-		return IPA_USB_MBIM;
+		return USB_PROT_MBIM_IPA;
 	if (!strncasecmp(name, "dpl", MAX_INST_NAME_LEN))
-		return IPA_USB_DIAG;
+		return USB_PROT_DIAG_IPA;
+	if (!strncasecmp(name, "rmnet.ether", MAX_INST_NAME_LEN))
+		return USB_PROT_RMNET_ETHER;
+	if (!strncasecmp(name, "dpl.ether", MAX_INST_NAME_LEN))
+		return USB_PROT_DPL_ETHER;
+	if (!strncasecmp(name, "gps", MAX_INST_NAME_LEN))
+		return USB_PROT_GPS_CTRL;
 
 error:
 	return -EINVAL;
@@ -309,6 +344,7 @@
 
 #define LOG2_STATUS_INTERVAL_MSEC 5
 #define MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
+#define GPS_MAX_NOTIFY_SIZE 64
 
 /* rmnet device descriptors */
 
@@ -1399,4 +1435,91 @@
 	&qdss_gsi_string_table,
 	NULL,
 };
+
+/* gps device descriptor */
+static struct usb_interface_descriptor gps_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =	USB_CLASS_VENDOR_SPEC,
+	/* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor gps_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+	.bInterval =		1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_descriptor_header *gps_fs_function[] = {
+	(struct usb_descriptor_header *) &gps_interface_desc,
+	(struct usb_descriptor_header *) &gps_fs_notify_desc,
+	NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor gps_hs_notify_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_descriptor_header *gps_hs_function[] = {
+	(struct usb_descriptor_header *) &gps_interface_desc,
+	(struct usb_descriptor_header *) &gps_hs_notify_desc,
+	NULL,
+};
+
+/* Super speed support */
+static struct usb_endpoint_descriptor gps_ss_notify_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor gps_ss_notify_comp_desc = {
+	.bLength =		sizeof(gps_ss_notify_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 3 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+	.wBytesPerInterval =	cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+};
+
+static struct usb_descriptor_header *gps_ss_function[] = {
+	(struct usb_descriptor_header *) &gps_interface_desc,
+	(struct usb_descriptor_header *) &gps_ss_notify_desc,
+	(struct usb_descriptor_header *) &gps_ss_notify_comp_desc,
+	NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string gps_string_defs[] = {
+	[0].s = "GPS",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings gps_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		gps_string_defs,
+};
+
+static struct usb_gadget_strings *gps_strings[] = {
+	&gps_string_table,
+	NULL,
+};
 #endif
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index 651776d..79ef286 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -137,6 +137,9 @@
 	} perf[MAX_ITERATION];
 	unsigned int dbg_read_index;
 	unsigned int dbg_write_index;
+	unsigned int mtp_rx_req_len;
+	unsigned int mtp_tx_req_len;
+	unsigned int mtp_tx_reqs;
 	struct mutex  read_mutex;
 };
 
@@ -531,16 +534,16 @@
 
 retry_tx_alloc:
 	/* now allocate requests for our endpoints */
-	for (i = 0; i < mtp_tx_reqs; i++) {
+	for (i = 0; i < dev->mtp_tx_reqs; i++) {
 		req = mtp_request_new(dev->ep_in,
-				mtp_tx_req_len + extra_buf_alloc);
+				dev->mtp_tx_req_len + extra_buf_alloc);
 		if (!req) {
-			if (mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)
+			if (dev->mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)
 				goto fail;
 			while ((req = mtp_req_get(dev, &dev->tx_idle)))
 				mtp_request_free(req, dev->ep_in);
-			mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
-			mtp_tx_reqs = MTP_TX_REQ_MAX;
+			dev->mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
+			dev->mtp_tx_reqs = MTP_TX_REQ_MAX;
 			goto retry_tx_alloc;
 		}
 		req->complete = mtp_complete_in;
@@ -553,18 +556,18 @@
 	 * operational speed.  Hence assuming super speed max
 	 * packet size.
 	 */
-	if (mtp_rx_req_len % 1024)
-		mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
+	if (dev->mtp_rx_req_len % 1024)
+		dev->mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
 
 retry_rx_alloc:
 	for (i = 0; i < RX_REQ_MAX; i++) {
-		req = mtp_request_new(dev->ep_out, mtp_rx_req_len);
+		req = mtp_request_new(dev->ep_out, dev->mtp_rx_req_len);
 		if (!req) {
-			if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
+			if (dev->mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
 				goto fail;
 			for (--i; i >= 0; i--)
 				mtp_request_free(dev->rx_req[i], dev->ep_out);
-			mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
+			dev->mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
 			goto retry_rx_alloc;
 		}
 		req->complete = mtp_complete_out;
@@ -609,11 +612,21 @@
 	}
 
 	len = ALIGN(count, dev->ep_out->maxpacket);
-	if (len > mtp_rx_req_len)
+	if (len > dev->mtp_rx_req_len)
 		return -EINVAL;
 
 	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_OFFLINE) {
+		spin_unlock_irq(&dev->lock);
+		return -ENODEV;
+	}
+
 	if (dev->ep_out->desc) {
+		if (!cdev) {
+			spin_unlock_irq(&dev->lock);
+			return -ENODEV;
+		}
+
 		len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count);
 		if (len > MTP_BULK_BUFFER_SIZE) {
 			spin_unlock_irq(&dev->lock);
@@ -750,8 +763,8 @@
 			break;
 		}
 
-		if (count > mtp_tx_req_len)
-			xfer = mtp_tx_req_len;
+		if (count > dev->mtp_tx_req_len)
+			xfer = dev->mtp_tx_req_len;
 		else
 			xfer = count;
 		if (xfer && copy_from_user(req->buf, buf, xfer)) {
@@ -847,8 +860,8 @@
 			break;
 		}
 
-		if (count > mtp_tx_req_len)
-			xfer = mtp_tx_req_len;
+		if (count > dev->mtp_tx_req_len)
+			xfer = dev->mtp_tx_req_len;
 		else
 			xfer = count;
 
@@ -944,7 +957,7 @@
 			cur_buf = (cur_buf + 1) % RX_REQ_MAX;
 
 			/* some h/w expects size to be aligned to ep's MTU */
-			read_req->length = mtp_rx_req_len;
+			read_req->length = dev->mtp_rx_req_len;
 
 			dev->rx_done = 0;
 			mutex_unlock(&dev->read_mutex);
@@ -1421,6 +1434,9 @@
 		mtp_tx_req_len = 16384;
 	}
 
+	dev->mtp_rx_req_len = mtp_rx_req_len;
+	dev->mtp_tx_req_len = mtp_tx_req_len;
+	dev->mtp_tx_reqs = mtp_tx_reqs;
 	/* allocate interface ID(s) */
 	id = usb_interface_id(c, f);
 	if (id < 0)
@@ -1498,7 +1514,10 @@
 	while ((req = mtp_req_get(dev, &dev->intr_idle)))
 		mtp_request_free(req, dev->ep_intr);
 	mutex_unlock(&dev->read_mutex);
+	spin_lock_irq(&dev->lock);
 	dev->state = STATE_OFFLINE;
+	dev->cdev = NULL;
+	spin_unlock_irq(&dev->lock);
 	kfree(f->os_desc_table);
 	f->os_desc_n = 0;
 	fi_mtp->func_inst.f = NULL;
@@ -1554,7 +1573,9 @@
 	struct usb_composite_dev	*cdev = dev->cdev;
 
 	DBG(cdev, "mtp_function_disable\n");
+	spin_lock_irq(&dev->lock);
 	dev->state = STATE_OFFLINE;
+	spin_unlock_irq(&dev->lock);
 	usb_ep_disable(dev->ep_in);
 	usb_ep_disable(dev->ep_out);
 	usb_ep_disable(dev->ep_intr);
@@ -1581,7 +1602,7 @@
 		seq_printf(s, "vfs write: bytes:%ld\t\t time:%d\n",
 				dev->perf[i].vfs_wbytes,
 				dev->perf[i].vfs_wtime);
-		if (dev->perf[i].vfs_wbytes == mtp_rx_req_len) {
+		if (dev->perf[i].vfs_wbytes == dev->mtp_rx_req_len) {
 			sum += dev->perf[i].vfs_wtime;
 			if (min > dev->perf[i].vfs_wtime)
 				min = dev->perf[i].vfs_wtime;
@@ -1603,7 +1624,7 @@
 		seq_printf(s, "vfs read: bytes:%ld\t\t time:%d\n",
 				dev->perf[i].vfs_rbytes,
 				dev->perf[i].vfs_rtime);
-		if (dev->perf[i].vfs_rbytes == mtp_tx_req_len) {
+		if (dev->perf[i].vfs_rbytes == dev->mtp_tx_req_len) {
 			sum += dev->perf[i].vfs_rtime;
 			if (min > dev->perf[i].vfs_rtime)
 				min = dev->perf[i].vfs_rtime;
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 0ce2c407..312ae24 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -1205,7 +1205,7 @@
 	return &usb_qdss->port.function;
 }
 
-DECLARE_USB_FUNCTION_INIT(qdss, qdss_alloc_inst, qdss_alloc);
+DECLARE_USB_FUNCTION(qdss, qdss_alloc_inst, qdss_alloc);
 static int __init usb_qdss_init(void)
 {
 	int ret;
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index 814b4a3e..24bc2d4 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -386,10 +386,31 @@
 					struct sk_buff *skb)
 {
 	struct sk_buff *skb2;
+	struct rndis_packet_msg_type *header = NULL;
+	struct f_rndis *rndis = func_to_rndis(&port->func);
 
 	if (!skb)
 		return NULL;
 
+	if (rndis->port.multi_pkt_xfer) {
+		if (port->header) {
+			header = port->header;
+			header->MessageType = cpu_to_le32(RNDIS_MSG_PACKET);
+			header->MessageLength = cpu_to_le32(skb->len +
+							sizeof(*header));
+			header->DataOffset = cpu_to_le32(36);
+			header->DataLength = cpu_to_le32(skb->len);
+			pr_debug("MessageLength:%d DataLength:%d\n",
+						header->MessageLength,
+						header->DataLength);
+			return skb;
+		}
+
+		dev_kfree_skb(skb);
+		pr_err("RNDIS header is NULL.\n");
+		return NULL;
+	}
+
 	skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
 	rndis_add_hdr(skb2);
 
@@ -818,6 +839,27 @@
 	rndis_data_intf.bInterfaceNumber = status;
 	rndis_union_desc.bSlaveInterface0 = status;
 
+	if (rndis_opts->wceis) {
+		/* "Wireless" RNDIS; auto-detected by Windows */
+		rndis_iad_descriptor.bFunctionClass =
+						USB_CLASS_WIRELESS_CONTROLLER;
+		rndis_iad_descriptor.bFunctionSubClass = 0x01;
+		rndis_iad_descriptor.bFunctionProtocol = 0x03;
+		rndis_control_intf.bInterfaceClass =
+						USB_CLASS_WIRELESS_CONTROLLER;
+		rndis_control_intf.bInterfaceSubClass =	 0x01;
+		rndis_control_intf.bInterfaceProtocol =	 0x03;
+	} else {
+		rndis_iad_descriptor.bFunctionClass = USB_CLASS_COMM;
+		rndis_iad_descriptor.bFunctionSubClass =
+						USB_CDC_SUBCLASS_ETHERNET;
+		rndis_iad_descriptor.bFunctionProtocol = USB_CDC_PROTO_NONE;
+		rndis_control_intf.bInterfaceClass = USB_CLASS_COMM;
+		rndis_control_intf.bInterfaceSubClass =	USB_CDC_SUBCLASS_ACM;
+		rndis_control_intf.bInterfaceProtocol =
+						USB_CDC_ACM_PROTO_VENDOR;
+	}
+
 	status = -ENODEV;
 
 	/* allocate instance-specific endpoints */
@@ -950,11 +992,15 @@
 /* f_rndis_opts_ifname */
 USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(rndis);
 
+/* f_rndis_opts_wceis */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_WCEIS(rndis);
+
 static struct configfs_attribute *rndis_attrs[] = {
 	&rndis_opts_attr_dev_addr,
 	&rndis_opts_attr_host_addr,
 	&rndis_opts_attr_qmult,
 	&rndis_opts_attr_ifname,
+	&rndis_opts_attr_wceis,
 	NULL,
 };
 
@@ -1008,6 +1054,9 @@
 	}
 	opts->rndis_interf_group = rndis_interf_group;
 
+	/* Enable "Wireless" RNDIS by default */
+	opts->wceis = true;
+
 	return &opts->func_inst;
 }
 
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index c330814..626e020 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1156,6 +1156,8 @@
 		dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
 		return ret;
 	}
+	iad_desc.bFirstInterface = ret;
+
 	std_ac_if_desc.bInterfaceNumber = ret;
 	iad_desc.bFirstInterface = ret;
 	agdev->ac_intf = ret;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 9e38809..c75e790 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -246,6 +246,7 @@
 }
 
 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
+static void tx_complete(struct usb_ep *ep, struct usb_request *req);
 
 static int
 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
@@ -307,7 +308,6 @@
 
 	req->buf = skb->data;
 	req->length = size;
-	req->complete = rx_complete;
 	req->context = skb;
 
 	retval = usb_ep_queue(out, req, gfp_flags);
@@ -410,6 +410,7 @@
 {
 	unsigned		i;
 	struct usb_request	*req;
+	bool			usb_in;
 
 	if (!n)
 		return -ENOMEM;
@@ -420,10 +421,22 @@
 		if (i-- == 0)
 			goto extra;
 	}
+
+	if (ep->desc->bEndpointAddress & USB_DIR_IN)
+		usb_in = true;
+	else
+		usb_in = false;
+
 	while (i--) {
 		req = usb_ep_alloc_request(ep, GFP_ATOMIC);
 		if (!req)
 			return list_empty(list) ? -ENOMEM : 0;
+		/* update completion handler */
+		if (usb_in)
+			req->complete = tx_complete;
+		else
+			req->complete = rx_complete;
+
 		list_add(&req->list, list);
 	}
 	return 0;
@@ -575,7 +588,7 @@
 
 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
 {
-	struct sk_buff	*skb = req->context;
+	struct sk_buff	*skb;
 	struct eth_dev	*dev = ep->driver_data;
 	struct net_device *net = dev->net;
 	struct usb_request *new_req;
@@ -683,6 +696,7 @@
 			spin_unlock(&dev->req_lock);
 		}
 	} else {
+		skb = req->context;
 		/* Is aggregation already enabled and buffers allocated ? */
 		if (dev->port_usb->multi_pkt_xfer && dev->tx_req_bufsize) {
 			req->buf = kzalloc(dev->tx_req_bufsize
@@ -725,7 +739,7 @@
 	list_for_each(act, &dev->tx_reqs) {
 		req = container_of(act, struct usb_request, list);
 		if (!req->buf)
-			req->buf = kmalloc(dev->tx_req_bufsize
+			req->buf = kzalloc(dev->tx_req_bufsize
 				+ dev->gadget->extra_buf_alloc, GFP_ATOMIC);
 
 		if (!req->buf)
@@ -804,29 +818,6 @@
 	}
 
 	dev->tx_pkts_rcvd++;
-	/*
-	 * no buffer copies needed, unless the network stack did it
-	 * or the hardware can't use skb buffers.
-	 * or there's not enough space for extra headers we need
-	 */
-	spin_lock_irqsave(&dev->lock, flags);
-	if (dev->wrap && dev->port_usb)
-		skb = dev->wrap(dev->port_usb, skb);
-	spin_unlock_irqrestore(&dev->lock, flags);
-
-	if (!skb) {
-		if (dev->port_usb && dev->port_usb->supports_multi_frame) {
-			/*
-			 * Multi frame CDC protocols may store the frame for
-			 * later which is not a dropped frame.
-			 */
-		} else {
-			dev->net->stats.tx_dropped++;
-		}
-
-		/* no error code for dropped packets */
-		return NETDEV_TX_OK;
-	}
 
 	/* Allocate memory for tx_reqs to support multi packet transfer */
 	spin_lock_irqsave(&dev->req_lock, flags);
@@ -861,13 +852,45 @@
 		dev->tx_throttle++;
 		netif_stop_queue(net);
 	}
-
-	dev->tx_skb_hold_count++;
 	spin_unlock_irqrestore(&dev->req_lock, flags);
 
+	/* no buffer copies needed, unless the network stack did it
+	 * or the hardware can't use skb buffers.
+	 * or there's not enough space for extra headers we need
+	 */
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->wrap) {
+		if (dev->port_usb)
+			skb = dev->wrap(dev->port_usb, skb);
+		if (!skb) {
+			spin_unlock_irqrestore(&dev->lock, flags);
+			/* Multi frame CDC protocols may store the frame for
+			 * later which is not a dropped frame.
+			 */
+			if (dev->port_usb &&
+					dev->port_usb->supports_multi_frame)
+				goto multiframe;
+			goto drop;
+		}
+	}
+
+	dev->tx_skb_hold_count++;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
 	if (multi_pkt_xfer) {
+		pr_debug("req->length:%d header_len:%u\n"
+				"skb->len:%d skb->data_len:%d\n",
+				req->length, dev->header_len,
+				skb->len, skb->data_len);
+		/* Add RNDIS Header */
+		memcpy(req->buf + req->length, dev->port_usb->header,
+						dev->header_len);
+		/* Increment req length by header size */
+		req->length += dev->header_len;
+		/* Copy received IP data from SKB */
 		memcpy(req->buf + req->length, skb->data, skb->len);
-		req->length = req->length + skb->len;
+		/* Increment req length by skb data length */
+		req->length += skb->len;
 		length = req->length;
 		dev_kfree_skb_any(skb);
 
@@ -927,8 +950,6 @@
 		req->context = skb;
 	}
 
-	req->complete = tx_complete;
-
 	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
 	if (is_fixed && length == fixed_in_len &&
 	    (length % in->maxpacket) == 0)
@@ -977,11 +998,13 @@
 			dev_kfree_skb_any(skb);
 		else
 			req->length = 0;
+drop:
 		dev->net->stats.tx_dropped++;
+multiframe:
 		spin_lock_irqsave(&dev->req_lock, flags);
 		if (list_empty(&dev->tx_reqs))
 			netif_start_queue(net);
-		list_add(&req->list, &dev->tx_reqs);
+		list_add_tail(&req->list, &dev->tx_reqs);
 		spin_unlock_irqrestore(&dev->req_lock, flags);
 	}
 success:
@@ -1443,6 +1466,8 @@
 	else
 		INFO(dev, "MAC %pM\n", dev->dev_mac);
 
+	uether_debugfs_init(dev);
+
 	return status;
 }
 EXPORT_SYMBOL_GPL(gether_register_netdev);
@@ -1594,6 +1619,11 @@
 	if (!dev)
 		return ERR_PTR(-EINVAL);
 
+	/* size of rndis_packet_msg_type is 44 */
+	link->header = kzalloc(44, GFP_ATOMIC);
+	if (!link->header)
+		return ERR_PTR(-ENOMEM);
+
 	if (link->in_ep) {
 		link->in_ep->driver_data = dev;
 		result = usb_ep_enable(link->in_ep);
@@ -1657,8 +1687,12 @@
 	}
 fail0:
 	/* caller is responsible for cleanup on error */
-	if (result < 0)
+	if (result < 0) {
+		kfree(link->header);
+
 		return ERR_PTR(result);
+	}
+
 	return dev->net;
 }
 EXPORT_SYMBOL_GPL(gether_connect);
@@ -1711,6 +1745,9 @@
 			spin_lock(&dev->req_lock);
 		}
 		spin_unlock(&dev->req_lock);
+		/* Free rndis header buffer memory */
+		kfree(link->header);
+		link->header = NULL;
 		link->in_ep->desc = NULL;
 	}
 
diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
index 3ff09d1..a10503a 100644
--- a/drivers/usb/gadget/function/u_ether.h
+++ b/drivers/usb/gadget/function/u_ether.h
@@ -87,6 +87,7 @@
 	/* called on network open/close */
 	void				(*open)(struct gether *);
 	void				(*close)(struct gether *);
+	struct rndis_packet_msg_type	*header;
 };
 
 #define	DEFAULT_FILTER	(USB_CDC_PACKET_TYPE_BROADCAST \
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index 0468459..1dd2ff4 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -188,4 +188,50 @@
 									\
 	CONFIGFS_ATTR_RO(_f_##_opts_, ifname)
 
+#define USB_ETHERNET_CONFIGFS_ITEM_ATTR_WCEIS(_f_)			\
+	static ssize_t _f_##_opts_wceis_show(struct config_item *item,	\
+					     char *page)		\
+	{								\
+		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
+		bool wceis;						\
+									\
+		if (opts->bound == false) {				\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;					\
+		}							\
+									\
+		mutex_lock(&opts->lock);				\
+		wceis = opts->wceis;					\
+		mutex_unlock(&opts->lock);				\
+		return snprintf(page, PAGE_SIZE, "%d", wceis);		\
+	}								\
+									\
+	static ssize_t _f_##_opts_wceis_store(struct config_item *item, \
+					      const char *page, size_t len)\
+	{								\
+		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
+		bool wceis;						\
+		int ret;						\
+									\
+		if (opts->bound == false) {				\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;					\
+		}							\
+									\
+		mutex_lock(&opts->lock);				\
+									\
+		ret = kstrtobool(page, &wceis);				\
+		if (ret)						\
+			goto out;					\
+									\
+		opts->wceis = wceis;					\
+		ret = len;						\
+out:									\
+		mutex_unlock(&opts->lock);				\
+									\
+		return ret;						\
+	}								\
+									\
+	CONFIGFS_ATTR(_f_##_opts_, wceis)
+
 #endif /* __U_ETHER_CONFIGFS_H */
diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h
index 4e2ad04..f829a5e 100644
--- a/drivers/usb/gadget/function/u_rndis.h
+++ b/drivers/usb/gadget/function/u_rndis.h
@@ -38,6 +38,9 @@
 	 */
 	struct mutex			lock;
 	int				refcnt;
+
+	/* "Wireless" RNDIS; auto-detected by Windows */
+	bool	wceis;
 };
 
 void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net);
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index aac0ce8..8991a40 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1310,7 +1310,7 @@
 {
 	struct fsl_ep *ep = get_ep_by_pipe(udc, pipe);
 
-	if (ep->name)
+	if (ep->ep.name)
 		nuke(ep, -ESHUTDOWN);
 }
 
@@ -1698,7 +1698,7 @@
 		curr_ep = get_ep_by_pipe(udc, i);
 
 		/* If the ep is configured */
-		if (curr_ep->name == NULL) {
+		if (!curr_ep->ep.name) {
 			WARNING("Invalid EP?");
 			continue;
 		}
diff --git a/drivers/usb/gadget/udc/goku_udc.h b/drivers/usb/gadget/udc/goku_udc.h
index 86d2ada..64eb0f2 100644
--- a/drivers/usb/gadget/udc/goku_udc.h
+++ b/drivers/usb/gadget/udc/goku_udc.h
@@ -28,7 +28,7 @@
 #	define INT_EP1DATASET		0x00040
 #	define INT_EP2DATASET		0x00080
 #	define INT_EP3DATASET		0x00100
-#define INT_EPnNAK(n)		(0x00100 < (n))		/* 0 < n < 4 */
+#define INT_EPnNAK(n)		(0x00100 << (n))	/* 0 < n < 4 */
 #	define INT_EP1NAK		0x00200
 #	define INT_EP2NAK		0x00400
 #	define INT_EP3NAK		0x00800
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 2c39571..216ddee 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -446,7 +446,8 @@
 	struct usb_hcd *hcd = ohci_to_hcd(ohci);
 
 	/* Accept arbitrarily long scatter-gather lists */
-	hcd->self.sg_tablesize = ~0;
+	if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+		hcd->self.sg_tablesize = ~0;
 
 	if (distrust_firmware)
 		ohci->flags |= OHCI_QUIRK_HUB_POWER;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 588e053..040feda 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -975,6 +975,8 @@
 	if (dev->out_ctx)
 		xhci_free_container_ctx(xhci, dev->out_ctx);
 
+	if (dev->udev && dev->udev->slot_id)
+		dev->udev->slot_id = 0;
 	kfree(xhci->devs[slot_id]);
 	xhci->devs[slot_id] = NULL;
 }
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 1ce079d..75afa8f 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -439,6 +439,39 @@
 	return -EBUSY;
 }
 
+static int xhci_plat_pm_freeze(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	if (!xhci)
+		return 0;
+
+	dev_dbg(dev, "xhci-plat freeze\n");
+
+	return xhci_suspend(xhci, false);
+}
+
+static int xhci_plat_pm_restore(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	int ret;
+
+	if (!xhci)
+		return 0;
+
+	dev_dbg(dev, "xhci-plat restore\n");
+
+	ret = xhci_resume(xhci, true);
+	pm_runtime_disable(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+	pm_runtime_mark_last_busy(dev);
+
+	return ret;
+}
+
 static int xhci_plat_runtime_suspend(struct device *dev)
 {
 	struct usb_hcd *hcd = dev_get_drvdata(dev);
@@ -470,7 +503,9 @@
 }
 
 static const struct dev_pm_ops xhci_plat_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(NULL, NULL)
+	.freeze		= xhci_plat_pm_freeze,
+	.restore	= xhci_plat_pm_restore,
+	.thaw		= xhci_plat_pm_restore,
 	SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend, xhci_plat_runtime_resume,
 			   xhci_plat_runtime_idle)
 };
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index fe2bbfb..9cba037 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -947,7 +947,7 @@
 	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
 	u32			command;
 
-	if (!hcd->state)
+	if (!hcd->state || xhci->suspended)
 		return 0;
 
 	if (hcd->state != HC_STATE_SUSPENDED ||
@@ -1017,6 +1017,7 @@
 	/* step 5: remove core well power */
 	/* synchronize irq when using MSI-X */
 	xhci_msix_sync_irqs(xhci);
+	xhci->suspended = true;
 
 	return rc;
 }
@@ -1036,7 +1037,7 @@
 	int			retval = 0;
 	bool			comp_timer_running = false;
 
-	if (!hcd->state)
+	if (!hcd->state || !xhci->suspended)
 		return 0;
 
 	/* Wait a bit if either of the roothubs need to settle from the
@@ -1173,6 +1174,7 @@
 
 	/* Re-enable port polling. */
 	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+	xhci->suspended = false;
 	set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
 	usb_hcd_poll_rh_status(xhci->shared_hcd);
 	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 4c1f556..a5153ca 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1697,6 +1697,7 @@
 	/* Compliance Mode Recovery Data */
 	struct timer_list	comp_mode_recovery_timer;
 	u32			port_status_u0;
+	bool			suspended;
 /* Compliance Mode Timer Triggered every 2 seconds */
 #define COMP_MODE_RCVRY_MSECS 2000
 
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 2d9a806..579aa9a 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1774,6 +1774,7 @@
 	int		vbus;
 	u8		devctl;
 
+	pm_runtime_get_sync(dev);
 	spin_lock_irqsave(&musb->lock, flags);
 	val = musb->a_wait_bcon;
 	vbus = musb_platform_get_vbus_status(musb);
@@ -1787,6 +1788,7 @@
 			vbus = 0;
 	}
 	spin_unlock_irqrestore(&musb->lock, flags);
+	pm_runtime_put_sync(dev);
 
 	return sprintf(buf, "Vbus %s, timeout %lu msec\n",
 			vbus ? "on" : "off", val);
@@ -2483,10 +2485,11 @@
 	musb_generic_disable(musb);
 	spin_unlock_irqrestore(&musb->lock, flags);
 	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+	musb_platform_exit(musb);
+
 	pm_runtime_dont_use_autosuspend(musb->controller);
 	pm_runtime_put_sync(musb->controller);
 	pm_runtime_disable(musb->controller);
-	musb_platform_exit(musb);
 	musb_phy_callback = NULL;
 	if (musb->dma_controller)
 		musb_dma_controller_destroy(musb->dma_controller);
@@ -2710,7 +2713,8 @@
 	if ((devctl & mask) != (musb->context.devctl & mask))
 		musb->port1_status = 0;
 
-	musb_start(musb);
+	musb_enable_interrupts(musb);
+	musb_platform_enable(musb);
 
 	spin_lock_irqsave(&musb->lock, flags);
 	error = musb_run_resume_work(musb);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index a55173c..f1219f6 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -442,7 +442,6 @@
 	req = next_request(musb_ep);
 	request = &req->request;
 
-	trace_musb_req_tx(req);
 	csr = musb_readw(epio, MUSB_TXCSR);
 	musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
 
@@ -481,6 +480,8 @@
 		u8	is_dma = 0;
 		bool	short_packet = false;
 
+		trace_musb_req_tx(req);
+
 		if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
 			is_dma = 1;
 			csr |= MUSB_TXCSR_P_WZC_BITS;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 4303389..e2bc915 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1023,7 +1023,9 @@
 			/* set tx_reinit and schedule the next qh */
 			ep->tx_reinit = 1;
 		}
-		musb_start_urb(musb, is_in, next_qh);
+
+		if (next_qh)
+			musb_start_urb(musb, is_in, next_qh);
 	}
 }
 
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index d21823b..6685f05 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -753,6 +753,7 @@
 {
 	int i;
 	union power_supply_propval val;
+	bool pps_found = false;
 	u32 first_pdo = pd->received_pdos[0];
 
 	if (PD_SRC_PDO_TYPE(first_pdo) != PD_SRC_PDO_TYPE_FIXED) {
@@ -768,10 +769,8 @@
 	power_supply_set_property(pd->usb_psy,
 			POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED, &val);
 
-	if (pd->spec_rev == USBPD_REV_30 && !rev3_sink_only) {
-		bool pps_found = false;
-
-		/* downgrade to 2.0 if no PPS */
+	/* Check for PPS APDOs */
+	if (pd->spec_rev == USBPD_REV_30) {
 		for (i = 1; i < PD_MAX_DATA_OBJ; i++) {
 			if ((PD_SRC_PDO_TYPE(pd->received_pdos[i]) ==
 					PD_SRC_PDO_TYPE_AUGMENTED) &&
@@ -780,10 +779,18 @@
 				break;
 			}
 		}
-		if (!pps_found)
+
+		/* downgrade to 2.0 if no PPS */
+		if (!pps_found && !rev3_sink_only)
 			pd->spec_rev = USBPD_REV_20;
 	}
 
+	val.intval = pps_found ?
+			POWER_SUPPLY_PD_PPS_ACTIVE :
+			POWER_SUPPLY_PD_ACTIVE;
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+
 	/* Select the first PDO (vSafe5V) immediately. */
 	pd_select_pdo(pd, 1, 0, 0);
 
@@ -2140,7 +2147,7 @@
 				usbpd_dbg(&pd->dev, "Src CapsCounter exceeded, disabling PD\n");
 				usbpd_set_state(pd, PE_SRC_DISABLED);
 
-				val.intval = 0;
+				val.intval = POWER_SUPPLY_PD_INACTIVE;
 				power_supply_set_property(pd->usb_psy,
 						POWER_SUPPLY_PROP_PD_ACTIVE,
 						&val);
@@ -2160,7 +2167,7 @@
 		pd->current_state = PE_SRC_SEND_CAPABILITIES_WAIT;
 		kick_sm(pd, SENDER_RESPONSE_TIME);
 
-		val.intval = 1;
+		val.intval = POWER_SUPPLY_PD_ACTIVE;
 		power_supply_set_property(pd->usb_psy,
 				POWER_SUPPLY_PROP_PD_ACTIVE, &val);
 		break;
@@ -2352,10 +2359,6 @@
 			pd->src_cap_id++;
 
 			usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
-
-			val.intval = 1;
-			power_supply_set_property(pd->usb_psy,
-					POWER_SUPPLY_PROP_PD_ACTIVE, &val);
 		} else if (pd->hard_reset_count < 3) {
 			usbpd_set_state(pd, PE_SNK_HARD_RESET);
 		} else {
@@ -2366,7 +2369,7 @@
 					POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
 					&val);
 
-			val.intval = 0;
+			val.intval = POWER_SUPPLY_PD_INACTIVE;
 			power_supply_set_property(pd->usb_psy,
 					POWER_SUPPLY_PROP_PD_ACTIVE, &val);
 		}
@@ -3577,7 +3580,7 @@
 {
 	struct usbpd *pd = dev_get_drvdata(dev);
 	int pos = PD_RDO_OBJ_POS(pd->rdo);
-	int type = PD_SRC_PDO_TYPE(pd->received_pdos[pos]);
+	int type = PD_SRC_PDO_TYPE(pd->received_pdos[pos - 1]);
 	int len;
 
 	len = scnprintf(buf, PAGE_SIZE, "Request Data Object\n"
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index e28173b..6dfca9c 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -71,7 +71,6 @@
 #define SQ_CTRL1_CHIRP_DISABLE		0x20
 #define SQ_CTRL2_CHIRP_DISABLE		0x80
 
-#define PORT_TUNE1_OVERRIDE_VAL		0xc5
 #define DEBUG_CTRL1_OVERRIDE_VAL	0x09
 
 /* PERIPH_SS_PHY_REFGEN_NORTH_BG_CTRL register bits */
@@ -149,10 +148,12 @@
 	/* override TUNEX registers value */
 	struct dentry		*root;
 	u8			tune[5];
+	u8                      bias_ctrl2;
 
 	struct hrtimer		timer;
 	int			soc_min_rev;
 	bool			host_chirp_erratum;
+	bool			override_bias_ctrl2;
 };
 
 #ifdef CONFIG_NVMEM
@@ -472,6 +473,7 @@
 static void qusb_phy_host_init(struct usb_phy *phy)
 {
 	u8 reg;
+	int p_index;
 	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
 
 	dev_dbg(phy->dev, "%s\n", __func__);
@@ -479,21 +481,51 @@
 	qusb_phy_write_seq(qphy->base, qphy->qusb_phy_host_init_seq,
 			qphy->host_init_seq_len, 0);
 
-	/* If soc revision is mentioned and host_chirp_erratum flag is set
-	 * then override TUNE1 and DEBUG_CTRL1
-	 */
-	if (qphy->soc_min_rev && qphy->host_chirp_erratum) {
-		writel_relaxed(PORT_TUNE1_OVERRIDE_VAL,
-				qphy->base + qphy->phy_reg[PORT_TUNE1]);
-		writel_relaxed(DEBUG_CTRL1_OVERRIDE_VAL,
-				qphy->base + qphy->phy_reg[DEBUG_CTRL1]);
+	if (qphy->efuse_reg) {
+		if (!qphy->tune_val)
+			qusb_phy_get_tune1_param(qphy);
+	} else {
+		/* For non fused chips we need to write the TUNE1 param as
+		 * specified in DT otherwise we will end up writing 0 to
+		 * to TUNE1
+		 */
+		qphy->tune_val = readb_relaxed(qphy->base +
+					qphy->phy_reg[PORT_TUNE1]);
 	}
 
-	if (qphy->refgen_north_bg_reg)
+	/* If soc revision is mentioned and host_chirp_erratum flag is set
+	 * then override TUNE1 and DEBUG_CTRL1 while honouring efuse values
+	 */
+	if (qphy->soc_min_rev && qphy->host_chirp_erratum) {
+		writel_relaxed(qphy->tune_val | BIT(7),
+			qphy->base + qphy->phy_reg[PORT_TUNE1]);
+		pr_debug("%s(): Programming TUNE1 parameter as:%x\n",
+			__func__, readb_relaxed(qphy->base +
+					qphy->phy_reg[PORT_TUNE1]));
+		writel_relaxed(DEBUG_CTRL1_OVERRIDE_VAL,
+			qphy->base + qphy->phy_reg[DEBUG_CTRL1]);
+	} else {
+		writel_relaxed(qphy->tune_val,
+			qphy->base + qphy->phy_reg[PORT_TUNE1]);
+	}
+
+	/* if debugfs based tunex params are set, use that value. */
+	for (p_index = 0; p_index < 5; p_index++) {
+		if (qphy->tune[p_index])
+			writel_relaxed(qphy->tune[p_index],
+				qphy->base + qphy->phy_reg[PORT_TUNE1] +
+							(4 * p_index));
+	}
+
+	if (qphy->refgen_north_bg_reg && qphy->override_bias_ctrl2)
 		if (readl_relaxed(qphy->refgen_north_bg_reg) & BANDGAP_BYPASS)
 			writel_relaxed(BIAS_CTRL_2_OVERRIDE_VAL,
 				qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
 
+	if (qphy->bias_ctrl2)
+		writel_relaxed(qphy->bias_ctrl2,
+				qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
+
 	/* Ensure above write is completed before turning ON ref clk */
 	wmb();
 
@@ -580,11 +612,15 @@
 							(4 * p_index));
 	}
 
-	if (qphy->refgen_north_bg_reg)
+	if (qphy->refgen_north_bg_reg && qphy->override_bias_ctrl2)
 		if (readl_relaxed(qphy->refgen_north_bg_reg) & BANDGAP_BYPASS)
 			writel_relaxed(BIAS_CTRL_2_OVERRIDE_VAL,
 				qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
 
+	if (qphy->bias_ctrl2)
+		writel_relaxed(qphy->bias_ctrl2,
+				qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
+
 	/* ensure above writes are completed before re-enabling PHY */
 	wmb();
 
@@ -946,11 +982,21 @@
 			dev_err(qphy->phy.dev,
 				"can't create debugfs entry for %s\n", name);
 			debugfs_remove_recursive(qphy->root);
-			ret = ENOMEM;
+			ret = -ENOMEM;
 			goto create_err;
 		}
 	}
 
+	file = debugfs_create_x8("bias_ctrl2", 0644, qphy->root,
+						&qphy->bias_ctrl2);
+	if (IS_ERR_OR_NULL(file)) {
+		dev_err(qphy->phy.dev,
+			"can't create debugfs entry for bias_ctrl2\n");
+		debugfs_remove_recursive(qphy->root);
+		ret = -ENOMEM;
+		goto create_err;
+	}
+
 create_err:
 	return ret;
 }
@@ -1190,6 +1236,9 @@
 	qphy->host_chirp_erratum = of_property_read_bool(dev->of_node,
 					"qcom,host-chirp-erratum");
 
+	qphy->override_bias_ctrl2 = of_property_read_bool(dev->of_node,
+					"qcom,override-bias-ctrl2");
+
 	ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level",
 					 (u32 *) qphy->vdd_levels,
 					 ARRAY_SIZE(qphy->vdd_levels));
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index 9c33c6e..47db12b 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -706,7 +706,9 @@
 				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
 
 			/* Disable PHY */
-			writel_relaxed(POWER_DOWN,
+			writel_relaxed(POWER_DOWN |
+				readl_relaxed(qphy->base +
+					QUSB2PHY_PORT_POWERDOWN),
 				qphy->base + QUSB2PHY_PORT_POWERDOWN);
 			/* Make sure that above write is completed */
 			wmb();
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index ad3bbf7..1795d24 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -25,10 +25,10 @@
 #include <linux/clk.h>
 #include <linux/reset.h>
 
-enum core_ldo_levels {
-	CORE_LEVEL_NONE = 0,
-	CORE_LEVEL_MIN,
-	CORE_LEVEL_MAX,
+enum ldo_levels {
+	VOLTAGE_LEVEL_NONE = 0,
+	VOLTAGE_LEVEL_MIN,
+	VOLTAGE_LEVEL_MAX,
 };
 
 #define INIT_MAX_TIME_USEC			1000
@@ -38,6 +38,8 @@
 #define USB_SSPHY_1P2_VOL_MAX		1200000 /* uV */
 #define USB_SSPHY_HPM_LOAD		23000	/* uA */
 
+#define USB_SSPHY_LOAD_DEFAULT		-1
+
 /* USB3PHY_PCIE_USB3_PCS_PCS_STATUS bit */
 #define PHYSTATUS				BIT(6)
 
@@ -119,6 +121,9 @@
 	int			vdd_levels[3]; /* none, low, high */
 	struct regulator	*core_ldo;
 	int			core_voltage_levels[3];
+	struct regulator	*fpc_redrive_ldo;
+	int			redrive_voltage_levels[3];
+	int			redrive_load;
 	struct clk		*ref_clk_src;
 	struct clk		*ref_clk;
 	struct clk		*aux_clk;
@@ -226,6 +231,33 @@
 	}
 }
 
+static int msm_ldo_enable(struct msm_ssphy_qmp *phy,
+		struct regulator *ldo, int *voltage_levels, int load)
+{
+	int ret = 0;
+
+	dev_dbg(phy->phy.dev,
+		"ldo: min_vol:%duV max_vol:%duV\n",
+		voltage_levels[VOLTAGE_LEVEL_MIN],
+		voltage_levels[VOLTAGE_LEVEL_MAX]);
+
+	if (load > 0) {
+		ret = regulator_set_load(ldo, load);
+		if (ret < 0)
+			return ret;
+	}
+
+	ret = regulator_set_voltage(ldo,
+			voltage_levels[VOLTAGE_LEVEL_MIN],
+			voltage_levels[VOLTAGE_LEVEL_MAX]);
+	if (ret)
+		return ret;
+
+	ret = regulator_enable(ldo);
+
+	return ret;
+}
+
 static int msm_ssusb_qmp_ldo_enable(struct msm_ssphy_qmp *phy, int on)
 {
 	int min, rc = 0;
@@ -245,74 +277,65 @@
 	if (!on)
 		goto disable_regulators;
 
-	rc = regulator_set_voltage(phy->vdd, phy->vdd_levels[min],
-				    phy->vdd_levels[2]);
-	if (rc) {
-		dev_err(phy->phy.dev, "unable to set voltage for ssusb vdd\n");
-		return rc;
+	if (phy->fpc_redrive_ldo) {
+		rc = msm_ldo_enable(phy, phy->fpc_redrive_ldo,
+				phy->redrive_voltage_levels,
+				phy->redrive_load);
+		if (rc < 0) {
+			dev_err(phy->phy.dev,
+				"enable phy->fpc_redrive_ldo failed\n");
+			return rc;
+		}
+
+		dev_dbg(phy->phy.dev,
+			"fpc redrive ldo: min_vol:%duV max_vol:%duV\n",
+			phy->redrive_voltage_levels[VOLTAGE_LEVEL_MIN],
+			phy->redrive_voltage_levels[VOLTAGE_LEVEL_MAX]);
 	}
 
-	dev_dbg(phy->phy.dev, "min_vol:%d max_vol:%d\n",
-		phy->vdd_levels[min], phy->vdd_levels[2]);
-
-	rc = regulator_enable(phy->vdd);
-	if (rc) {
-		dev_err(phy->phy.dev,
-			"regulator_enable(phy->vdd) failed, ret=%d",
-			rc);
-		goto unconfig_vdd;
-	}
-
-	rc = regulator_set_load(phy->core_ldo, USB_SSPHY_HPM_LOAD);
+	rc = msm_ldo_enable(phy, phy->vdd, phy->vdd_levels,
+			USB_SSPHY_LOAD_DEFAULT);
 	if (rc < 0) {
-		dev_err(phy->phy.dev, "Unable to set HPM of core_ldo\n");
+		dev_err(phy->phy.dev, "enable phy->vdd failed\n");
+		goto disable_fpc_redrive;
+	}
+
+	dev_dbg(phy->phy.dev,
+		"vdd ldo: min_vol:%duV max_vol:%duV\n",
+		phy->vdd_levels[VOLTAGE_LEVEL_MIN],
+		phy->vdd_levels[VOLTAGE_LEVEL_MAX]);
+
+	rc = msm_ldo_enable(phy, phy->core_ldo, phy->core_voltage_levels,
+			USB_SSPHY_HPM_LOAD);
+	if (rc < 0) {
+		dev_err(phy->phy.dev, "enable phy->core_ldo failed\n");
 		goto disable_vdd;
 	}
 
-	rc = regulator_set_voltage(phy->core_ldo,
-			phy->core_voltage_levels[CORE_LEVEL_MIN],
-			phy->core_voltage_levels[CORE_LEVEL_MAX]);
-	if (rc) {
-		dev_err(phy->phy.dev, "unable to set voltage for core_ldo\n");
-		goto put_core_ldo_lpm;
-	}
-
-	rc = regulator_enable(phy->core_ldo);
-	if (rc) {
-		dev_err(phy->phy.dev, "Unable to enable core_ldo\n");
-		goto unset_core_ldo;
-	}
+	dev_dbg(phy->phy.dev,
+		"core ldo: min_vol:%duV max_vol:%duV\n",
+		phy->core_voltage_levels[VOLTAGE_LEVEL_MIN],
+		phy->core_voltage_levels[VOLTAGE_LEVEL_MAX]);
 
 	return 0;
 
 disable_regulators:
 	rc = regulator_disable(phy->core_ldo);
 	if (rc)
-		dev_err(phy->phy.dev, "Unable to disable core_ldo\n");
-
-unset_core_ldo:
-	rc = regulator_set_voltage(phy->core_ldo,
-			phy->core_voltage_levels[CORE_LEVEL_NONE],
-			phy->core_voltage_levels[CORE_LEVEL_MAX]);
-	if (rc)
-		dev_err(phy->phy.dev, "unable to set voltage for core_ldo\n");
-
-put_core_ldo_lpm:
-	rc = regulator_set_load(phy->core_ldo, 0);
-	if (rc < 0)
-		dev_err(phy->phy.dev, "Unable to set LPM of core_ldo\n");
+		dev_err(phy->phy.dev, "disable phy->core_ldo failed\n");
 
 disable_vdd:
 	rc = regulator_disable(phy->vdd);
 	if (rc)
-		dev_err(phy->phy.dev, "regulator_disable(phy->vdd) failed, ret=%d",
-			rc);
+		dev_err(phy->phy.dev, "disable phy->vdd failed\n");
 
-unconfig_vdd:
-	rc = regulator_set_voltage(phy->vdd, phy->vdd_levels[min],
-				    phy->vdd_levels[2]);
-	if (rc)
-		dev_err(phy->phy.dev, "unable to set voltage for ssusb vdd\n");
+disable_fpc_redrive:
+	if (phy->fpc_redrive_ldo) {
+		rc = regulator_disable(phy->fpc_redrive_ldo);
+		if (rc)
+			dev_err(phy->phy.dev,
+				"disable phy->fpc_redrive_ldo failed\n");
+	}
 
 	return rc < 0 ? rc : 0;
 }
@@ -634,6 +657,9 @@
 	if (suspend) {
 		if (phy->cable_connected)
 			msm_ssusb_qmp_enable_autonomous(phy, 1);
+		else
+			writel_relaxed(0x00,
+			phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
 
 		/* Make sure above write completed with PHY */
 		wmb();
@@ -944,9 +970,9 @@
 	}
 
 	/* Set default core voltage values */
-	phy->core_voltage_levels[CORE_LEVEL_NONE] = 0;
-	phy->core_voltage_levels[CORE_LEVEL_MIN] = USB_SSPHY_1P2_VOL_MIN;
-	phy->core_voltage_levels[CORE_LEVEL_MAX] = USB_SSPHY_1P2_VOL_MAX;
+	phy->core_voltage_levels[VOLTAGE_LEVEL_NONE] = 0;
+	phy->core_voltage_levels[VOLTAGE_LEVEL_MIN] = USB_SSPHY_1P2_VOL_MIN;
+	phy->core_voltage_levels[VOLTAGE_LEVEL_MAX] = USB_SSPHY_1P2_VOL_MAX;
 
 	if (of_get_property(dev->of_node, "qcom,core-voltage-level", &len) &&
 		len == sizeof(phy->core_voltage_levels)) {
@@ -990,6 +1016,39 @@
 		goto err;
 	}
 
+	phy->fpc_redrive_ldo = devm_regulator_get_optional(dev, "fpc-redrive");
+	if (IS_ERR(phy->fpc_redrive_ldo)) {
+		phy->fpc_redrive_ldo = NULL;
+		dev_dbg(dev, "no FPC re-drive ldo regulator\n");
+	} else {
+		if (of_get_property(dev->of_node,
+				"qcom,redrive-voltage-level", &len) &&
+				len == sizeof(phy->redrive_voltage_levels)) {
+			ret = of_property_read_u32_array(dev->of_node,
+					"qcom,redrive-voltage-level",
+					(u32 *) phy->redrive_voltage_levels,
+					len / sizeof(u32));
+			if (ret) {
+				dev_err(dev,
+					"err qcom,redrive-voltage-level\n");
+				goto err;
+			}
+		} else {
+			ret = -EINVAL;
+			dev_err(dev, "err inputs for redrive-voltage-level\n");
+			goto err;
+		}
+
+		ret = of_property_read_u32(dev->of_node, "qcom,redrive-load",
+				&phy->redrive_load);
+		if (ret) {
+			dev_err(&pdev->dev, "unable to read redrive load\n");
+			goto err;
+		}
+
+		dev_dbg(dev, "Get FPC re-drive ldo regulator\n");
+	}
+
 	platform_set_drvdata(pdev, phy);
 
 	if (of_property_read_bool(dev->of_node, "qcom,vbus-valid-override"))
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 39086bf..b8f4907 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -213,6 +213,7 @@
 	bool enable_streaming;
 	bool enable_axi_prefetch;
 	bool vbus_low_as_hostmode;
+	bool phy_id_high_as_peripheral;
 };
 
 #define SDP_CHECK_DELAY_MS 10000 /* in ms */
@@ -257,11 +258,26 @@
 MODULE_PARM_DESC(lpm_disconnect_thresh,
 	"Delay before entering LPM on USB disconnect");
 
+static bool floated_charger_enable;
+module_param(floated_charger_enable, bool, 0644);
+MODULE_PARM_DESC(floated_charger_enable,
+	"Whether to enable floated charger");
+
 /* by default debugging is enabled */
 static unsigned int enable_dbg_log = 1;
 module_param(enable_dbg_log, uint, 0644);
 MODULE_PARM_DESC(enable_dbg_log, "Debug buffer events");
 
+/* Max current to be drawn for DCP charger */
+static int dcp_max_current = IDEV_CHG_MAX;
+module_param(dcp_max_current, int, 0644);
+MODULE_PARM_DESC(dcp_max_current, "max current drawn for DCP charger");
+
+static bool chg_detection_for_float_charger;
+module_param(chg_detection_for_float_charger, bool, 0644);
+MODULE_PARM_DESC(chg_detection_for_float_charger,
+	"Whether to do PHY based charger detection for float chargers");
+
 static struct msm_otg *the_msm_otg;
 static bool debug_bus_voting_enabled;
 
@@ -1324,6 +1340,7 @@
 	struct msm_otg_platform_data *pdata = motg->pdata;
 	int cnt;
 	bool host_bus_suspend, device_bus_suspend, sm_work_busy;
+	bool host_pc_charger;
 	u32 cmd_val;
 	u32 portsc, config2;
 	u32 func_ctrl;
@@ -1351,6 +1368,9 @@
 	if (host_bus_suspend)
 		msm_otg_perf_vote_update(motg, false);
 
+	host_pc_charger = (motg->chg_type == USB_SDP_CHARGER) ||
+				(motg->chg_type == USB_CDP_CHARGER);
+
 	/* !BSV, but its handling is in progress by otg sm_work */
 	sm_work_busy = !test_bit(B_SESS_VLD, &motg->inputs) &&
 			phy->otg->state == OTG_STATE_B_PERIPHERAL;
@@ -1377,8 +1397,8 @@
 	 * Don't abort suspend in case of dcp detected by PMIC
 	 */
 
-	if ((test_bit(B_SESS_VLD, &motg->inputs) && !device_bus_suspend) ||
-							sm_work_busy) {
+	if ((test_bit(B_SESS_VLD, &motg->inputs) && !device_bus_suspend &&
+				host_pc_charger) || sm_work_busy) {
 		msm_otg_dbg_log_event(phy, "LPM ENTER ABORTED",
 						motg->inputs, 0);
 		enable_irq(motg->irq);
@@ -1815,8 +1835,11 @@
 	union power_supply_propval pval = {0};
 
 	if (!psy) {
-		dev_err(motg->phy.dev, "no usb power supply registered\n");
-		return -ENODEV;
+		psy = power_supply_get_by_name("usb");
+		if (!psy) {
+			dev_err(motg->phy.dev, "Could not get usb power_supply\n");
+			return -ENODEV;
+		}
 	}
 
 	power_supply_get_property(psy, POWER_SUPPLY_PROP_REAL_TYPE, &pval);
@@ -1824,7 +1847,44 @@
 	return pval.intval;
 }
 
-static void msm_otg_notify_chg_current(struct msm_otg *motg, unsigned int mA)
+static int msm_otg_notify_chg_type(struct msm_otg *motg)
+{
+	static int charger_type;
+	union power_supply_propval propval;
+	int ret = 0;
+	/*
+	 * TODO
+	 * Unify OTG driver charger types and power supply charger types
+	 */
+	if (charger_type == motg->chg_type)
+		return 0;
+
+	if (motg->chg_type == USB_SDP_CHARGER)
+		charger_type = POWER_SUPPLY_TYPE_USB;
+	else if (motg->chg_type == USB_CDP_CHARGER)
+		charger_type = POWER_SUPPLY_TYPE_USB_CDP;
+	else if (motg->chg_type == USB_DCP_CHARGER ||
+			motg->chg_type == USB_NONCOMPLIANT_CHARGER)
+		charger_type = POWER_SUPPLY_TYPE_USB_DCP;
+	else if (motg->chg_type == USB_FLOATED_CHARGER)
+		charger_type = POWER_SUPPLY_TYPE_USB_FLOAT;
+	else
+		charger_type = POWER_SUPPLY_TYPE_UNKNOWN;
+
+	pr_debug("Trying to set usb power supply type %d\n", charger_type);
+
+	propval.intval = charger_type;
+	ret = power_supply_set_property(psy, POWER_SUPPLY_PROP_REAL_TYPE,
+								&propval);
+	if (ret)
+		dev_dbg(motg->phy.dev, "power supply error when setting property\n");
+
+	msm_otg_dbg_log_event(&motg->phy, "SET USB PWR SUPPLY TYPE",
+			motg->chg_type, charger_type);
+	return ret;
+}
+
+static void msm_otg_notify_charger(struct msm_otg *motg, unsigned int mA)
 {
 	struct usb_gadget *g = motg->phy.otg->gadget;
 	union power_supply_propval pval = {0};
@@ -1833,6 +1893,16 @@
 	if (g && g->is_a_peripheral)
 		return;
 
+	dev_dbg(motg->phy.dev, "Requested curr from USB = %u\n", mA);
+
+	psy_type = get_psy_type(motg);
+	if (psy_type == -ENODEV)
+		return;
+
+	if (msm_otg_notify_chg_type(motg))
+		dev_dbg(motg->phy.dev, "Failed notifying %d charger type to PMIC\n",
+							motg->chg_type);
+
 	psy_type = get_psy_type(motg);
 	if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
 		if (!mA)
@@ -1842,9 +1912,7 @@
 		goto set_prop;
 	}
 
-	dev_dbg(motg->phy.dev, "Requested curr from USB = %u\n", mA);
-
-	if (motg->cur_power == mA || psy_type != POWER_SUPPLY_TYPE_USB)
+	if (motg->cur_power == mA)
 		return;
 
 	dev_info(motg->phy.dev, "Avail curr from USB = %u\n", mA);
@@ -1863,15 +1931,12 @@
 	motg->cur_power = mA;
 }
 
-static void msm_otg_notify_chg_current_work(struct work_struct *w)
+static void msm_otg_notify_charger_work(struct work_struct *w)
 {
 	struct msm_otg *motg = container_of(w,
-				struct msm_otg, notify_chg_current_work);
-	/*
-	 * Gadget driver uses set_power method to notify about the
-	 * available current based on suspend/configured states.
-	 */
-	msm_otg_notify_chg_current(motg, motg->notify_current_mA);
+				struct msm_otg, notify_charger_work);
+
+	msm_otg_notify_charger(motg, motg->notify_current_mA);
 }
 
 static int msm_otg_set_power(struct usb_phy *phy, unsigned int mA)
@@ -1879,7 +1944,14 @@
 	struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
 
 	motg->notify_current_mA = mA;
-	schedule_work(&motg->notify_chg_current_work);
+	/*
+	 * Gadget driver uses set_power method to notify about the
+	 * available current based on suspend/configured states.
+	 */
+	if (motg->chg_type == USB_SDP_CHARGER ||
+	    get_psy_type(motg) == POWER_SUPPLY_TYPE_USB ||
+	    get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_FLOAT)
+		queue_work(motg->otg_wq, &motg->notify_charger_work);
 
 	return 0;
 }
@@ -2280,6 +2352,301 @@
 		return true;
 }
 
+static bool msm_chg_check_secondary_det(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+	u32 chg_det;
+
+	chg_det = ulpi_read(phy, 0x87);
+
+	return (chg_det & 1);
+}
+
+static void msm_chg_enable_secondary_det(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+
+	/*
+	 * Configure DM as current source, DP as current sink
+	 * and enable battery charging comparators.
+	 */
+	ulpi_write(phy, 0x8, 0x85);
+	ulpi_write(phy, 0x2, 0x85);
+	ulpi_write(phy, 0x1, 0x85);
+}
+
+static bool msm_chg_check_primary_det(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+	u32 chg_det;
+	bool ret = false;
+
+	chg_det = ulpi_read(phy, 0x87);
+	ret = chg_det & 1;
+	/* Turn off VDP_SRC */
+	ulpi_write(phy, 0x3, 0x86);
+	msleep(20);
+
+	return ret;
+}
+
+static void msm_chg_enable_primary_det(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+
+	/*
+	 * Configure DP as current source, DM as current sink
+	 * and enable battery charging comparators.
+	 */
+	ulpi_write(phy, 0x2, 0x85);
+	ulpi_write(phy, 0x1, 0x85);
+}
+
+static bool msm_chg_check_dcd(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+	u32 line_state;
+
+	line_state = ulpi_read(phy, 0x87);
+
+	return line_state & 2;
+}
+
+static void msm_chg_disable_dcd(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+
+	ulpi_write(phy, 0x10, 0x86);
+	/*
+	 * Disable the Rdm_down after
+	 * the DCD is completed.
+	 */
+	ulpi_write(phy, 0x04, 0x0C);
+}
+
+static void msm_chg_enable_dcd(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+
+	/*
+	 * Idp_src and Rdm_down are de-coupled
+	 * on Femto PHY. If Idp_src alone is
+	 * enabled, DCD timeout is observed with
+	 * wall charger. But a genuine DCD timeout
+	 * may be incorrectly interpreted. Also
+	 * BC1.2 compliance testers expect Rdm_down
+	 * to enabled during DCD. Enable Rdm_down
+	 * explicitly before enabling the DCD.
+	 */
+	ulpi_write(phy, 0x04, 0x0B);
+	ulpi_write(phy, 0x10, 0x85);
+}
+
+static void msm_chg_block_on(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+	u32 func_ctrl;
+
+	/* put the controller in non-driving mode */
+	func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
+	func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+	func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
+	ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
+
+	/* disable DP and DM pull down resistors */
+	ulpi_write(phy, 0x6, 0xC);
+	/* Clear charger detecting control bits */
+	ulpi_write(phy, 0x1F, 0x86);
+	/* Clear alt interrupt latch and enable bits */
+	ulpi_write(phy, 0x1F, 0x92);
+	ulpi_write(phy, 0x1F, 0x95);
+	udelay(100);
+}
+
+static void msm_chg_block_off(struct msm_otg *motg)
+{
+	struct usb_phy *phy = &motg->phy;
+	u32 func_ctrl;
+
+	/* Clear charger detecting control bits */
+	ulpi_write(phy, 0x3F, 0x86);
+	/* Clear alt interrupt latch and enable bits */
+	ulpi_write(phy, 0x1F, 0x92);
+	ulpi_write(phy, 0x1F, 0x95);
+	/* re-enable DP and DM pull down resistors */
+	ulpi_write(phy, 0x6, 0xB);
+
+	/* put the controller in normal mode */
+	func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
+	func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+	func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
+	ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
+}
+
+#define MSM_CHG_DCD_TIMEOUT		(750 * HZ/1000) /* 750 msec */
+#define MSM_CHG_DCD_POLL_TIME		(50 * HZ/1000) /* 50 msec */
+#define MSM_CHG_PRIMARY_DET_TIME	(50 * HZ/1000) /* TVDPSRC_ON */
+#define MSM_CHG_SECONDARY_DET_TIME	(50 * HZ/1000) /* TVDMSRC_ON */
+
+static void msm_chg_detect_work(struct work_struct *w)
+{
+	struct msm_otg *motg = container_of(w, struct msm_otg, chg_work.work);
+	struct usb_phy *phy = &motg->phy;
+	bool is_dcd = false, tmout, vout, queue_sm_work = false;
+	static bool dcd;
+	u32 line_state, dm_vlgc;
+	unsigned long delay = 0;
+
+	dev_dbg(phy->dev, "chg detection work\n");
+	msm_otg_dbg_log_event(phy, "CHG DETECTION WORK",
+			motg->chg_state, get_pm_runtime_counter(phy->dev));
+
+	switch (motg->chg_state) {
+	case USB_CHG_STATE_UNDEFINED:
+		pm_runtime_get_sync(phy->dev);
+		msm_chg_block_on(motg);
+	case USB_CHG_STATE_IN_PROGRESS:
+		if (!motg->vbus_state) {
+			motg->chg_state = USB_CHG_STATE_UNDEFINED;
+			motg->chg_type = USB_INVALID_CHARGER;
+			msm_chg_block_off(motg);
+			pm_runtime_put_sync(phy->dev);
+			return;
+		}
+
+		msm_chg_enable_dcd(motg);
+		motg->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
+		motg->dcd_time = 0;
+		delay = MSM_CHG_DCD_POLL_TIME;
+		break;
+	case USB_CHG_STATE_WAIT_FOR_DCD:
+		if (!motg->vbus_state) {
+			motg->chg_state = USB_CHG_STATE_IN_PROGRESS;
+			break;
+		}
+
+		is_dcd = msm_chg_check_dcd(motg);
+		motg->dcd_time += MSM_CHG_DCD_POLL_TIME;
+		tmout = motg->dcd_time >= MSM_CHG_DCD_TIMEOUT;
+		if (is_dcd || tmout) {
+			if (is_dcd)
+				dcd = true;
+			else
+				dcd = false;
+			msm_chg_disable_dcd(motg);
+			msm_chg_enable_primary_det(motg);
+			delay = MSM_CHG_PRIMARY_DET_TIME;
+			motg->chg_state = USB_CHG_STATE_DCD_DONE;
+		} else {
+			delay = MSM_CHG_DCD_POLL_TIME;
+		}
+		break;
+	case USB_CHG_STATE_DCD_DONE:
+		if (!motg->vbus_state) {
+			motg->chg_state = USB_CHG_STATE_IN_PROGRESS;
+			break;
+		}
+
+		vout = msm_chg_check_primary_det(motg);
+		line_state = readl_relaxed(USB_PORTSC) & PORTSC_LS;
+		dm_vlgc = line_state & PORTSC_LS_DM;
+		if (vout && !dm_vlgc) { /* VDAT_REF < DM < VLGC */
+			if (line_state) { /* DP > VLGC */
+				motg->chg_type = USB_NONCOMPLIANT_CHARGER;
+				motg->chg_state = USB_CHG_STATE_DETECTED;
+			} else {
+				msm_chg_enable_secondary_det(motg);
+				delay = MSM_CHG_SECONDARY_DET_TIME;
+				motg->chg_state = USB_CHG_STATE_PRIMARY_DONE;
+			}
+		} else { /* DM < VDAT_REF || DM > VLGC */
+			if (line_state) /* DP > VLGC or/and DM > VLGC */
+				motg->chg_type = USB_NONCOMPLIANT_CHARGER;
+			else if (!dcd && floated_charger_enable)
+				motg->chg_type = USB_FLOATED_CHARGER;
+			else
+				motg->chg_type = USB_SDP_CHARGER;
+
+			motg->chg_state = USB_CHG_STATE_DETECTED;
+		}
+		break;
+	case USB_CHG_STATE_PRIMARY_DONE:
+		if (!motg->vbus_state) {
+			motg->chg_state = USB_CHG_STATE_IN_PROGRESS;
+			break;
+		}
+
+		vout = msm_chg_check_secondary_det(motg);
+		if (vout)
+			motg->chg_type = USB_DCP_CHARGER;
+		else
+			motg->chg_type = USB_CDP_CHARGER;
+		motg->chg_state = USB_CHG_STATE_SECONDARY_DONE;
+		/* fall through */
+	case USB_CHG_STATE_SECONDARY_DONE:
+		motg->chg_state = USB_CHG_STATE_DETECTED;
+	case USB_CHG_STATE_DETECTED:
+		if (!motg->vbus_state) {
+			motg->chg_state = USB_CHG_STATE_IN_PROGRESS;
+			break;
+		}
+
+		msm_chg_block_off(motg);
+
+		/* Enable VDP_SRC in case of DCP charger */
+		if (motg->chg_type == USB_DCP_CHARGER) {
+			ulpi_write(phy, 0x2, 0x85);
+			msm_otg_notify_charger(motg, dcp_max_current);
+		} else if (motg->chg_type == USB_NONCOMPLIANT_CHARGER)
+			msm_otg_notify_charger(motg, dcp_max_current);
+		else if (motg->chg_type == USB_FLOATED_CHARGER ||
+					motg->chg_type == USB_CDP_CHARGER)
+			msm_otg_notify_charger(motg, IDEV_CHG_MAX);
+
+		msm_otg_dbg_log_event(phy, "CHG WORK PUT: CHG_TYPE",
+			motg->chg_type, get_pm_runtime_counter(phy->dev));
+		/* to match _get at the start of chg_det_work */
+		pm_runtime_mark_last_busy(phy->dev);
+		pm_runtime_put_autosuspend(phy->dev);
+		motg->chg_state = USB_CHG_STATE_QUEUE_SM_WORK;
+		break;
+	case USB_CHG_STATE_QUEUE_SM_WORK:
+		if (!motg->vbus_state) {
+			pm_runtime_get_sync(phy->dev);
+			/* Turn off VDP_SRC if charger is DCP type */
+			if (motg->chg_type == USB_DCP_CHARGER)
+				ulpi_write(phy, 0x2, 0x86);
+
+			motg->chg_state = USB_CHG_STATE_UNDEFINED;
+			if (motg->chg_type == USB_SDP_CHARGER ||
+			    motg->chg_type == USB_CDP_CHARGER)
+				queue_sm_work = true;
+
+			motg->chg_type = USB_INVALID_CHARGER;
+			msm_otg_notify_charger(motg, 0);
+			motg->cur_power = 0;
+			msm_chg_block_off(motg);
+			pm_runtime_mark_last_busy(phy->dev);
+			pm_runtime_put_autosuspend(phy->dev);
+			if (queue_sm_work)
+				queue_work(motg->otg_wq, &motg->sm_work);
+			else
+				return;
+		}
+
+		if (motg->chg_type == USB_CDP_CHARGER ||
+		    motg->chg_type == USB_SDP_CHARGER)
+			queue_work(motg->otg_wq, &motg->sm_work);
+
+		return;
+	default:
+		return;
+	}
+
+	msm_otg_dbg_log_event(phy, "CHG WORK: QUEUE", motg->chg_type, delay);
+	queue_delayed_work(motg->otg_wq, &motg->chg_work, delay);
+}
+
 /*
  * We support OTG, Peripheral only and Host only configurations. In case
  * of OTG, mode switch (host-->peripheral/peripheral-->host) can happen
@@ -2325,10 +2692,17 @@
 				else
 					clear_bit(ID, &motg->inputs);
 			} else if (motg->phy_irq) {
-				if (msm_otg_read_phy_id_state(motg))
+				if (msm_otg_read_phy_id_state(motg)) {
 					set_bit(ID, &motg->inputs);
-				else
+					if (pdata->phy_id_high_as_peripheral)
+						set_bit(B_SESS_VLD,
+								&motg->inputs);
+				} else {
 					clear_bit(ID, &motg->inputs);
+					if (pdata->phy_id_high_as_peripheral)
+						clear_bit(B_SESS_VLD,
+								&motg->inputs);
+				}
 			}
 		}
 		break;
@@ -2374,32 +2748,31 @@
 static void msm_otg_sm_work(struct work_struct *w)
 {
 	struct msm_otg *motg = container_of(w, struct msm_otg, sm_work);
+	struct usb_phy *phy = &motg->phy;
 	struct usb_otg *otg = motg->phy.otg;
 	struct device *dev = otg->usb_phy->dev;
 	bool work = 0;
 	int ret;
 
 	pr_debug("%s work\n", usb_otg_state_string(otg->state));
-	msm_otg_dbg_log_event(&motg->phy, "SM WORK:",
-			otg->state, motg->inputs);
+	msm_otg_dbg_log_event(phy, "SM WORK:", otg->state, motg->inputs);
 
 	/* Just resume h/w if reqd, pm_count is handled based on state/inputs */
 	if (motg->resume_pending) {
-		pm_runtime_get_sync(otg->usb_phy->dev);
+		pm_runtime_get_sync(dev);
 		if (atomic_read(&motg->in_lpm)) {
 			dev_err(dev, "SM WORK: USB is in LPM\n");
-			msm_otg_dbg_log_event(&motg->phy,
-					"SM WORK: USB IS IN LPM",
+			msm_otg_dbg_log_event(phy, "SM WORK: USB IS IN LPM",
 					otg->state, motg->inputs);
 			msm_otg_resume(motg);
 		}
 		motg->resume_pending = false;
-		pm_runtime_put_noidle(otg->usb_phy->dev);
+		pm_runtime_put_noidle(dev);
 	}
 
 	switch (otg->state) {
 	case OTG_STATE_UNDEFINED:
-		pm_runtime_get_sync(otg->usb_phy->dev);
+		pm_runtime_get_sync(dev);
 		msm_otg_reset(otg->usb_phy);
 		/* Add child device only after block reset */
 		ret = of_platform_populate(motg->pdev->dev.of_node, NULL, NULL,
@@ -2411,21 +2784,20 @@
 		otg->state = OTG_STATE_B_IDLE;
 		if (!test_bit(B_SESS_VLD, &motg->inputs) &&
 				test_bit(ID, &motg->inputs)) {
-			msm_otg_dbg_log_event(&motg->phy,
-				"PM RUNTIME: UNDEF PUT",
-				get_pm_runtime_counter(otg->usb_phy->dev), 0);
-			pm_runtime_put_sync(otg->usb_phy->dev);
+			msm_otg_dbg_log_event(phy, "PM RUNTIME: UNDEF PUT",
+				get_pm_runtime_counter(dev), 0);
+			pm_runtime_put_sync(dev);
 			break;
 		}
-		pm_runtime_put(otg->usb_phy->dev);
+		pm_runtime_put(dev);
 		/* FALL THROUGH */
 	case OTG_STATE_B_IDLE:
 		if (!test_bit(ID, &motg->inputs) && otg->host) {
 			pr_debug("!id\n");
-			msm_otg_dbg_log_event(&motg->phy, "!ID",
+			msm_otg_dbg_log_event(phy, "!ID",
 					motg->inputs, otg->state);
 			if (!otg->host) {
-				msm_otg_dbg_log_event(&motg->phy,
+				msm_otg_dbg_log_event(phy,
 					"SM WORK: Host Not Set",
 					otg->state, motg->inputs);
 				break;
@@ -2435,10 +2807,10 @@
 			otg->state = OTG_STATE_A_HOST;
 		} else if (test_bit(B_SESS_VLD, &motg->inputs)) {
 			pr_debug("b_sess_vld\n");
-			msm_otg_dbg_log_event(&motg->phy, "B_SESS_VLD",
+			msm_otg_dbg_log_event(phy, "B_SESS_VLD",
 					motg->inputs, otg->state);
 			if (!otg->gadget) {
-				msm_otg_dbg_log_event(&motg->phy,
+				msm_otg_dbg_log_event(phy,
 					"SM WORK: Gadget Not Set",
 					otg->state, motg->inputs);
 				break;
@@ -2446,25 +2818,24 @@
 
 			if (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_FLOAT)
 				queue_delayed_work(motg->otg_wq,
-					&motg->sdp_check,
-					msecs_to_jiffies(SDP_CHECK_DELAY_MS));
+				  &motg->sdp_check,
+				  msecs_to_jiffies(SDP_CHECK_DELAY_MS));
 
 			pm_runtime_get_sync(otg->usb_phy->dev);
 			msm_otg_start_peripheral(otg, 1);
 			otg->state = OTG_STATE_B_PERIPHERAL;
 		} else {
 			pr_debug("Cable disconnected\n");
-			msm_otg_dbg_log_event(&motg->phy, "RT: Cable DISC",
-				get_pm_runtime_counter(otg->usb_phy->dev), 0);
-
-			msm_otg_notify_chg_current(motg, 0);
+			msm_otg_dbg_log_event(phy, "RT: Cable DISC",
+				get_pm_runtime_counter(dev), 0);
+			msm_otg_notify_charger(motg, 0);
 		}
 		break;
 	case OTG_STATE_B_PERIPHERAL:
 		if (!test_bit(B_SESS_VLD, &motg->inputs)) {
 			cancel_delayed_work_sync(&motg->sdp_check);
 			msm_otg_start_peripheral(otg, 0);
-			msm_otg_dbg_log_event(&motg->phy, "RT PM: B_PERI A PUT",
+			msm_otg_dbg_log_event(phy, "RT PM: B_PERI A PUT",
 				get_pm_runtime_counter(dev), 0);
 			/* _put for _get done on cable connect in B_IDLE */
 			pm_runtime_mark_last_busy(dev);
@@ -2474,8 +2845,7 @@
 			work = 1;
 		} else if (test_bit(A_BUS_SUSPEND, &motg->inputs)) {
 			pr_debug("a_bus_suspend\n");
-			msm_otg_dbg_log_event(&motg->phy,
-				"BUS_SUSPEND: PM RT PUT",
+			msm_otg_dbg_log_event(phy, "BUS_SUSPEND: PM RT PUT",
 				get_pm_runtime_counter(dev), 0);
 			otg->state = OTG_STATE_B_SUSPEND;
 			/* _get on connect in B_IDLE or host resume in B_SUSP */
@@ -2493,8 +2863,7 @@
 		} else if (!test_bit(A_BUS_SUSPEND, &motg->inputs)) {
 			pr_debug("!a_bus_suspend\n");
 			otg->state = OTG_STATE_B_PERIPHERAL;
-			msm_otg_dbg_log_event(&motg->phy,
-				"BUS_RESUME: PM RT GET",
+			msm_otg_dbg_log_event(phy, "BUS_RESUME: PM RT GET",
 				get_pm_runtime_counter(dev), 0);
 			pm_runtime_get_sync(dev);
 		}
@@ -2611,7 +2980,26 @@
 		else
 			set_bit(ID, &motg->inputs);
 	}
-	msm_otg_kick_sm_work(motg);
+
+	/*
+	 * Enable PHY based charger detection in 2 cases:
+	 * 1. PMI not capable of doing charger detection and provides VBUS
+	 *    notification with UNKNOWN psy type.
+	 * 2. Data lines have been cut off from PMI, in which case it provides
+	 *    VBUS notification with FLOAT psy type and we want to do PHY based
+	 *    charger detection by setting 'chg_detection_for_float_charger'.
+	 */
+	if (test_bit(B_SESS_VLD, &motg->inputs) && !motg->chg_detection) {
+		if ((get_psy_type(motg) == POWER_SUPPLY_TYPE_UNKNOWN) ||
+		    (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_FLOAT &&
+		     chg_detection_for_float_charger))
+			motg->chg_detection = true;
+	}
+
+	if (motg->chg_detection)
+		queue_delayed_work(motg->otg_wq, &motg->chg_work, 0);
+	else
+		msm_otg_kick_sm_work(motg);
 }
 
 static void msm_id_status_w(struct work_struct *w)
@@ -2637,6 +3025,8 @@
 			gpio_direction_input(motg->pdata->switch_sel_gpio);
 		if (!test_and_set_bit(ID, &motg->inputs)) {
 			pr_debug("ID set\n");
+			if (motg->pdata->phy_id_high_as_peripheral)
+				set_bit(B_SESS_VLD, &motg->inputs);
 			msm_otg_dbg_log_event(&motg->phy, "ID SET",
 					motg->inputs, motg->phy.otg->state);
 			work = 1;
@@ -2646,6 +3036,8 @@
 			gpio_direction_output(motg->pdata->switch_sel_gpio, 1);
 		if (test_and_clear_bit(ID, &motg->inputs)) {
 			pr_debug("ID clear\n");
+			if (motg->pdata->phy_id_high_as_peripheral)
+				clear_bit(B_SESS_VLD, &motg->inputs);
 			msm_otg_dbg_log_event(&motg->phy, "ID CLEAR",
 					motg->inputs, motg->phy.otg->state);
 			work = 1;
@@ -3424,6 +3816,10 @@
 
 	pdata->vbus_low_as_hostmode = of_property_read_bool(node,
 					"qcom,vbus-low-as-hostmode");
+
+	pdata->phy_id_high_as_peripheral = of_property_read_bool(node,
+					"qcom,phy-id-high-as-peripheral");
+
 	return pdata;
 }
 
@@ -3865,11 +4261,11 @@
 	motg->id_state = USB_ID_FLOAT;
 	set_bit(ID, &motg->inputs);
 	INIT_WORK(&motg->sm_work, msm_otg_sm_work);
+	INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work);
 	INIT_DELAYED_WORK(&motg->id_status_work, msm_id_status_w);
 	INIT_DELAYED_WORK(&motg->perf_vote_work, msm_otg_perf_vote_work);
 	INIT_DELAYED_WORK(&motg->sdp_check, check_for_sdp_connection);
-	INIT_WORK(&motg->notify_chg_current_work,
-			 msm_otg_notify_chg_current_work);
+	INIT_WORK(&motg->notify_charger_work, msm_otg_notify_charger_work);
 	motg->otg_wq = alloc_ordered_workqueue("k_otg", 0);
 	if (!motg->otg_wq) {
 		pr_err("%s: Unable to create workqueue otg_wq\n",
@@ -4079,12 +4475,8 @@
 	}
 
 	psy = power_supply_get_by_name("usb");
-	if (!psy) {
-		dev_dbg(&pdev->dev, "Could not get usb power_supply\n");
-		ret = -EPROBE_DEFER;
-		goto otg_remove_devices;
-	}
-
+	if (!psy)
+		dev_warn(&pdev->dev, "Could not get usb power_supply\n");
 
 	ret = msm_otg_extcon_register(motg);
 	if (ret)
@@ -4139,7 +4531,6 @@
 put_psy:
 	if (psy)
 		power_supply_put(psy);
-otg_remove_devices:
 	if (pdev->dev.of_node)
 		msm_otg_setup_devices(pdev, motg->pdata->mode, false);
 remove_cdev:
@@ -4220,12 +4611,13 @@
 	if (psy)
 		power_supply_put(psy);
 	msm_otg_debugfs_cleanup();
+	cancel_delayed_work_sync(&motg->chg_work);
 	cancel_delayed_work_sync(&motg->sdp_check);
 	cancel_delayed_work_sync(&motg->id_status_work);
 	cancel_delayed_work_sync(&motg->perf_vote_work);
 	msm_otg_perf_vote_update(motg, false);
 	cancel_work_sync(&motg->sm_work);
-	cancel_work_sync(&motg->notify_chg_current_work);
+	cancel_work_sync(&motg->notify_charger_work);
 	destroy_workqueue(motg->otg_wq);
 
 	pm_runtime_resume(&pdev->dev);
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 584ae8c..77c3ebe 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -62,6 +62,7 @@
 		- Fundamental Software dongle.
 		- Google USB serial devices
 		- HP4x calculators
+		- Libtransistor USB console
 		- a number of Motorola phones
 		- Motorola Tetra devices
 		- Novatel Wireless GPS receivers
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index cab80ac..90d7c6e 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -211,6 +211,7 @@
 	{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
 	{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
 	{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
+	{ USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */
 	{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
 	{ } /* Terminating Entry */
 };
@@ -744,7 +745,7 @@
 	unsigned int cflag;
 	struct cp210x_flow_ctl flow_ctl;
 	u32 baud;
-	u16 bits;
+	u16 bits = 0;
 	u32 ctl_hs;
 
 	cp210x_read_u32_reg(port, CP210X_GET_BAUDRATE, &baud);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 71cbc68..2e2f7363 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1911,7 +1911,8 @@
 		return ftdi_jtag_probe(serial);
 
 	if (udev->product &&
-		(!strcmp(udev->product, "BeagleBone/XDS100V2") ||
+		(!strcmp(udev->product, "Arrow USB Blaster") ||
+		 !strcmp(udev->product, "BeagleBone/XDS100V2") ||
 		 !strcmp(udev->product, "SNAP Connect E10")))
 		return ftdi_jtag_probe(serial);
 
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1799aa0..d982c45 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -236,6 +236,8 @@
 /* These Quectel products use Qualcomm's vendor ID */
 #define QUECTEL_PRODUCT_UC20			0x9003
 #define QUECTEL_PRODUCT_UC15			0x9090
+/* These u-blox products use Qualcomm's vendor ID */
+#define UBLOX_PRODUCT_R410M			0x90b2
 /* These Yuga products use Qualcomm's vendor ID */
 #define YUGA_PRODUCT_CLM920_NC5			0x9625
 
@@ -244,6 +246,7 @@
 #define QUECTEL_PRODUCT_EC21			0x0121
 #define QUECTEL_PRODUCT_EC25			0x0125
 #define QUECTEL_PRODUCT_BG96			0x0296
+#define QUECTEL_PRODUCT_EP06			0x0306
 
 #define CMOTECH_VENDOR_ID			0x16d8
 #define CMOTECH_PRODUCT_6001			0x6001
@@ -550,147 +553,15 @@
 #define WETELECOM_PRODUCT_6802			0x6802
 #define WETELECOM_PRODUCT_WMD300		0x6803
 
-struct option_blacklist_info {
-	/* bitmask of interface numbers blacklisted for send_setup */
-	const unsigned long sendsetup;
-	/* bitmask of interface numbers that are reserved */
-	const unsigned long reserved;
-};
 
-static const struct option_blacklist_info four_g_w14_blacklist = {
-	.sendsetup = BIT(0) | BIT(1),
-};
+/* Device flags */
 
-static const struct option_blacklist_info four_g_w100_blacklist = {
-	.sendsetup = BIT(1) | BIT(2),
-	.reserved = BIT(3),
-};
+/* Interface does not support modem-control requests */
+#define NCTRL(ifnum)	((BIT(ifnum) & 0xff) << 8)
 
-static const struct option_blacklist_info alcatel_x200_blacklist = {
-	.sendsetup = BIT(0) | BIT(1),
-	.reserved = BIT(4),
-};
+/* Interface is reserved */
+#define RSVD(ifnum)	((BIT(ifnum) & 0xff) << 0)
 
-static const struct option_blacklist_info zte_0037_blacklist = {
-	.sendsetup = BIT(0) | BIT(1),
-};
-
-static const struct option_blacklist_info zte_k3765_z_blacklist = {
-	.sendsetup = BIT(0) | BIT(1) | BIT(2),
-	.reserved = BIT(4),
-};
-
-static const struct option_blacklist_info zte_ad3812_z_blacklist = {
-	.sendsetup = BIT(0) | BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info zte_mc2718_z_blacklist = {
-	.sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info zte_mc2716_z_blacklist = {
-	.sendsetup = BIT(1) | BIT(2) | BIT(3),
-};
-
-static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
-	.reserved = BIT(2) | BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info zte_me3620_xl_blacklist = {
-	.reserved = BIT(3) | BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info zte_zm8620_x_blacklist = {
-	.reserved = BIT(3) | BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info huawei_cdc12_blacklist = {
-	.reserved = BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info net_intf0_blacklist = {
-	.reserved = BIT(0),
-};
-
-static const struct option_blacklist_info net_intf1_blacklist = {
-	.reserved = BIT(1),
-};
-
-static const struct option_blacklist_info net_intf2_blacklist = {
-	.reserved = BIT(2),
-};
-
-static const struct option_blacklist_info net_intf3_blacklist = {
-	.reserved = BIT(3),
-};
-
-static const struct option_blacklist_info net_intf4_blacklist = {
-	.reserved = BIT(4),
-};
-
-static const struct option_blacklist_info net_intf5_blacklist = {
-	.reserved = BIT(5),
-};
-
-static const struct option_blacklist_info net_intf6_blacklist = {
-	.reserved = BIT(6),
-};
-
-static const struct option_blacklist_info zte_mf626_blacklist = {
-	.sendsetup = BIT(0) | BIT(1),
-	.reserved = BIT(4),
-};
-
-static const struct option_blacklist_info zte_1255_blacklist = {
-	.reserved = BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info simcom_sim7100e_blacklist = {
-	.reserved = BIT(5) | BIT(6),
-};
-
-static const struct option_blacklist_info telit_me910_blacklist = {
-	.sendsetup = BIT(0),
-	.reserved = BIT(1) | BIT(3),
-};
-
-static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
-	.sendsetup = BIT(0),
-	.reserved = BIT(3),
-};
-
-static const struct option_blacklist_info telit_le910_blacklist = {
-	.sendsetup = BIT(0),
-	.reserved = BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info telit_le920_blacklist = {
-	.sendsetup = BIT(0),
-	.reserved = BIT(1) | BIT(5),
-};
-
-static const struct option_blacklist_info telit_le920a4_blacklist_1 = {
-	.sendsetup = BIT(0),
-	.reserved = BIT(1),
-};
-
-static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
-	.sendsetup = BIT(2),
-	.reserved = BIT(0) | BIT(1) | BIT(3),
-};
-
-static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
-	.sendsetup = BIT(0),
-	.reserved = BIT(1) | BIT(2) | BIT(3),
-};
-
-static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
-	.reserved = BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
-	.reserved = BIT(1) | BIT(4),
-};
 
 static const struct usb_device_id option_ids[] = {
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -724,26 +595,26 @@
 	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
 	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
 	{ USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t) &net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+	  .driver_info = RSVD(1) | RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+	  .driver_info = RSVD(1) | RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff),	/* Huawei E1820 */
-		.driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+	  .driver_info = RSVD(1) | RSVD(2) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) },
@@ -1188,65 +1059,70 @@
 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
 	{ USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
 	/* Quectel products using Qualcomm vendor ID */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	/* Yuga products use Qualcomm vendor ID */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
-	  .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
+	  .driver_info = RSVD(1) | RSVD(4) },
+	/* u-blox products using Qualcomm vendor ID */
+	{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
+	  .driver_info = RSVD(1) | RSVD(3) },
 	/* Quectel products using Quectel vendor ID */
 	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
+	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06),
+	  .driver_info = RSVD(4) | RSVD(5) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
-	  .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+	  .driver_info = RSVD(0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
-	  .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
-	  .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
-	  .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
@@ -1254,38 +1130,38 @@
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+	  .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
-		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+	  .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
-		.driver_info = (kernel_ulong_t)&telit_me910_blacklist },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
-		.driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
+	  .driver_info = NCTRL(0) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
-		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
-		.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(5) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
-		.driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+	  .driver_info = NCTRL(0) | RSVD(1) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
-		.driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+	  .driver_info = NCTRL(0) | RSVD(1) },
 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
@@ -1301,58 +1177,58 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff,
-	  0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mf626_blacklist },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff),
+	  .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&zte_0037_blacklist },
+	  .driver_info = NCTRL(0) | NCTRL(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
@@ -1377,26 +1253,26 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+	  .driver_info = RSVD(6) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) },
@@ -1412,50 +1288,50 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
-	  .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
@@ -1572,23 +1448,23 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&zte_1255_blacklist },
+	  .driver_info = RSVD(3) | RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
@@ -1603,7 +1479,7 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
@@ -1639,17 +1515,17 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff),  /* ZTE MF91 */
-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff),  /* Telewell TW-LTE 4G v2 */
-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1667,8 +1543,8 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
-	  0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff),
+	  .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
 
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
@@ -1679,20 +1555,20 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+	  .driver_info = RSVD(1) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
@@ -1844,19 +1720,19 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
-	 .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
+	 .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) | NCTRL(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
-	 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+	 .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
-	 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
+	 .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) },
 	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
-	 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+	 .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
 	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
-	 .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
+	 .driver_info = RSVD(2) | RSVD(3) | RSVD(4) },
 	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
-	 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+	 .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
 	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
-	 .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
+	 .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
@@ -1876,37 +1752,34 @@
 	{ USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
-	  .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
+	  .driver_info = RSVD(5) | RSVD(6) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
-	  .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
-	},
+	  .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
-	  .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+	  .driver_info = RSVD(6) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052),
-	  .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+	  .driver_info = RSVD(6) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6),
-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7),
-	  .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+	  .driver_info = RSVD(5) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
-	  .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+	  .driver_info = RSVD(2) },
 	{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
 	{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
-  	  .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
-  	},
+	  .driver_info = NCTRL(0) | NCTRL(1) },
 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
-	  .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
-	},
+	  .driver_info = NCTRL(1) | NCTRL(2) | RSVD(3) },
 	{USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
-	 .driver_info = (kernel_ulong_t)&net_intf3_blacklist},
+	 .driver_info = RSVD(3)},
 	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
 	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+	  .driver_info = RSVD(3) },
 	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
 	{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -1932,14 +1805,14 @@
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
-		.driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
+	  .driver_info = RSVD(4) | RSVD(5) },
 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
@@ -1949,20 +1822,20 @@
 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155),
-		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+	  .driver_info = RSVD(6) },
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
-		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+	  .driver_info = RSVD(6) },
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160),
-		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+	  .driver_info = RSVD(6) },
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
 	{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
 	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
@@ -2039,9 +1912,9 @@
 	{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) },	/* TP-Link LTE Module */
 	{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(TPLINK_VENDOR_ID, 0x9000),					/* TP-Link MA260 */
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) },	/* D-Link DWM-156 (variant) */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) },	/* D-Link DWM-156 (variant) */
@@ -2052,9 +1925,9 @@
 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) },			/* D-Link DWM-158 */
 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) },			/* D-Link DWM-157 C1 */
 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),			/* D-Link DWM-222 */
-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
@@ -2114,7 +1987,7 @@
 	struct usb_interface_descriptor *iface_desc =
 				&serial->interface->cur_altsetting->desc;
 	struct usb_device_descriptor *dev_desc = &serial->dev->descriptor;
-	const struct option_blacklist_info *blacklist;
+	unsigned long device_flags = id->driver_info;
 
 	/* Never bind to the CD-Rom emulation interface	*/
 	if (iface_desc->bInterfaceClass == 0x08)
@@ -2125,9 +1998,7 @@
 	 * the same class/subclass/protocol as the serial interfaces.  Look at
 	 * the Windows driver .INF files for reserved interface numbers.
 	 */
-	blacklist = (void *)id->driver_info;
-	if (blacklist && test_bit(iface_desc->bInterfaceNumber,
-						&blacklist->reserved))
+	if (device_flags & RSVD(iface_desc->bInterfaceNumber))
 		return -ENODEV;
 	/*
 	 * Don't bind network interface on Samsung GT-B3730, it is handled by
@@ -2138,8 +2009,8 @@
 	    iface_desc->bInterfaceClass != USB_CLASS_CDC_DATA)
 		return -ENODEV;
 
-	/* Store the blacklist info so we can use it during attach. */
-	usb_set_serial_data(serial, (void *)blacklist);
+	/* Store the device flags so we can use them during attach. */
+	usb_set_serial_data(serial, (void *)device_flags);
 
 	return 0;
 }
@@ -2147,22 +2018,21 @@
 static int option_attach(struct usb_serial *serial)
 {
 	struct usb_interface_descriptor *iface_desc;
-	const struct option_blacklist_info *blacklist;
 	struct usb_wwan_intf_private *data;
+	unsigned long device_flags;
 
 	data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
-	/* Retrieve blacklist info stored at probe. */
-	blacklist = usb_get_serial_data(serial);
+	/* Retrieve device flags stored at probe. */
+	device_flags = (unsigned long)usb_get_serial_data(serial);
 
 	iface_desc = &serial->interface->cur_altsetting->desc;
 
-	if (!blacklist || !test_bit(iface_desc->bInterfaceNumber,
-						&blacklist->sendsetup)) {
+	if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
 		data->use_send_setup = 1;
-	}
+
 	spin_lock_init(&data->susp_lock);
 
 	usb_set_serial_data(serial, data);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 6aa7ff2..2674da4 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -66,6 +66,11 @@
 					0x01) }
 DEVICE(google, GOOGLE_IDS);
 
+/* Libtransistor USB console */
+#define LIBTRANSISTOR_IDS()			\
+	{ USB_DEVICE(0x1209, 0x8b00) }
+DEVICE(libtransistor, LIBTRANSISTOR_IDS);
+
 /* ViVOpay USB Serial Driver */
 #define VIVOPAY_IDS()			\
 	{ USB_DEVICE(0x1d5f, 0x1004) }	/* ViVOpay 8800 */
@@ -113,6 +118,7 @@
 	&funsoft_device,
 	&flashloader_device,
 	&google_device,
+	&libtransistor_device,
 	&vivopay_device,
 	&moto_modem_device,
 	&motorola_tetra_device,
@@ -129,6 +135,7 @@
 	FUNSOFT_IDS(),
 	FLASHLOADER_IDS(),
 	GOOGLE_IDS(),
+	LIBTRANSISTOR_IDS(),
 	VIVOPAY_IDS(),
 	MOTO_IDS(),
 	MOTOROLA_TETRA_IDS(),
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 337a0be..dbc3801 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -338,47 +338,48 @@
 		goto exit;
 	}
 
-	if (retval == sizeof(*connection_info)) {
-			connection_info = (struct visor_connection_info *)
-							transfer_buffer;
-
-		num_ports = le16_to_cpu(connection_info->num_ports);
-		for (i = 0; i < num_ports; ++i) {
-			switch (
-			   connection_info->connections[i].port_function_id) {
-			case VISOR_FUNCTION_GENERIC:
-				string = "Generic";
-				break;
-			case VISOR_FUNCTION_DEBUGGER:
-				string = "Debugger";
-				break;
-			case VISOR_FUNCTION_HOTSYNC:
-				string = "HotSync";
-				break;
-			case VISOR_FUNCTION_CONSOLE:
-				string = "Console";
-				break;
-			case VISOR_FUNCTION_REMOTE_FILE_SYS:
-				string = "Remote File System";
-				break;
-			default:
-				string = "unknown";
-				break;
-			}
-			dev_info(dev, "%s: port %d, is for %s use\n",
-				serial->type->description,
-				connection_info->connections[i].port, string);
-		}
+	if (retval != sizeof(*connection_info)) {
+		dev_err(dev, "Invalid connection information received from device\n");
+		retval = -ENODEV;
+		goto exit;
 	}
-	/*
-	* Handle devices that report invalid stuff here.
-	*/
+
+	connection_info = (struct visor_connection_info *)transfer_buffer;
+
+	num_ports = le16_to_cpu(connection_info->num_ports);
+
+	/* Handle devices that report invalid stuff here. */
 	if (num_ports == 0 || num_ports > 2) {
 		dev_warn(dev, "%s: No valid connect info available\n",
 			serial->type->description);
 		num_ports = 2;
 	}
 
+	for (i = 0; i < num_ports; ++i) {
+		switch (connection_info->connections[i].port_function_id) {
+		case VISOR_FUNCTION_GENERIC:
+			string = "Generic";
+			break;
+		case VISOR_FUNCTION_DEBUGGER:
+			string = "Debugger";
+			break;
+		case VISOR_FUNCTION_HOTSYNC:
+			string = "HotSync";
+			break;
+		case VISOR_FUNCTION_CONSOLE:
+			string = "Console";
+			break;
+		case VISOR_FUNCTION_REMOTE_FILE_SYS:
+			string = "Remote File System";
+			break;
+		default:
+			string = "unknown";
+			break;
+		}
+		dev_info(dev, "%s: port %d, is for %s use\n",
+			serial->type->description,
+			connection_info->connections[i].port, string);
+	}
 	dev_info(dev, "%s: Number of ports: %d\n", serial->type->description,
 		num_ports);
 
diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
index 910f027..84c0599 100644
--- a/drivers/usb/usbip/stub.h
+++ b/drivers/usb/usbip/stub.h
@@ -87,6 +87,7 @@
 	struct stub_device *sdev;
 	struct usb_device *udev;
 	char shutdown_busid;
+	spinlock_t busid_lock;
 };
 
 /* stub_priv is allocated from stub_priv_cache */
@@ -97,6 +98,7 @@
 
 /* stub_main.c */
 struct bus_id_priv *get_busid_priv(const char *busid);
+void put_busid_priv(struct bus_id_priv *bid);
 int del_match_busid(char *busid);
 void stub_device_cleanup_urbs(struct stub_device *sdev);
 
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index 3550224..8e629b6 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -314,9 +314,9 @@
 	struct stub_device *sdev = NULL;
 	const char *udev_busid = dev_name(&udev->dev);
 	struct bus_id_priv *busid_priv;
-	int rc;
+	int rc = 0;
 
-	dev_dbg(&udev->dev, "Enter\n");
+	dev_dbg(&udev->dev, "Enter probe\n");
 
 	/* check we should claim or not by busid_table */
 	busid_priv = get_busid_priv(udev_busid);
@@ -331,13 +331,15 @@
 		 * other matched drivers by the driver core.
 		 * See driver_probe_device() in driver/base/dd.c
 		 */
-		return -ENODEV;
+		rc = -ENODEV;
+		goto call_put_busid_priv;
 	}
 
 	if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) {
 		dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n",
 			 udev_busid);
-		return -ENODEV;
+		rc = -ENODEV;
+		goto call_put_busid_priv;
 	}
 
 	if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
@@ -345,13 +347,16 @@
 			"%s is attached on vhci_hcd... skip!\n",
 			udev_busid);
 
-		return -ENODEV;
+		rc = -ENODEV;
+		goto call_put_busid_priv;
 	}
 
 	/* ok, this is my device */
 	sdev = stub_device_alloc(udev);
-	if (!sdev)
-		return -ENOMEM;
+	if (!sdev) {
+		rc = -ENOMEM;
+		goto call_put_busid_priv;
+	}
 
 	dev_info(&udev->dev,
 		"usbip-host: register new device (bus %u dev %u)\n",
@@ -383,7 +388,9 @@
 	}
 	busid_priv->status = STUB_BUSID_ALLOC;
 
-	return 0;
+	rc = 0;
+	goto call_put_busid_priv;
+
 err_files:
 	usb_hub_release_port(udev->parent, udev->portnum,
 			     (struct usb_dev_state *) udev);
@@ -393,6 +400,9 @@
 
 	busid_priv->sdev = NULL;
 	stub_device_free(sdev);
+
+call_put_busid_priv:
+	put_busid_priv(busid_priv);
 	return rc;
 }
 
@@ -418,7 +428,7 @@
 	struct bus_id_priv *busid_priv;
 	int rc;
 
-	dev_dbg(&udev->dev, "Enter\n");
+	dev_dbg(&udev->dev, "Enter disconnect\n");
 
 	busid_priv = get_busid_priv(udev_busid);
 	if (!busid_priv) {
@@ -431,7 +441,7 @@
 	/* get stub_device */
 	if (!sdev) {
 		dev_err(&udev->dev, "could not get device");
-		return;
+		goto call_put_busid_priv;
 	}
 
 	dev_set_drvdata(&udev->dev, NULL);
@@ -446,12 +456,12 @@
 				  (struct usb_dev_state *) udev);
 	if (rc) {
 		dev_dbg(&udev->dev, "unable to release port\n");
-		return;
+		goto call_put_busid_priv;
 	}
 
 	/* If usb reset is called from event handler */
 	if (usbip_in_eh(current))
-		return;
+		goto call_put_busid_priv;
 
 	/* shutdown the current connection */
 	shutdown_busid(busid_priv);
@@ -462,12 +472,11 @@
 	busid_priv->sdev = NULL;
 	stub_device_free(sdev);
 
-	if (busid_priv->status == STUB_BUSID_ALLOC) {
+	if (busid_priv->status == STUB_BUSID_ALLOC)
 		busid_priv->status = STUB_BUSID_ADDED;
-	} else {
-		busid_priv->status = STUB_BUSID_OTHER;
-		del_match_busid((char *)udev_busid);
-	}
+
+call_put_busid_priv:
+	put_busid_priv(busid_priv);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 325b4c0..fa90496 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -28,6 +28,7 @@
 #define DRIVER_DESC "USB/IP Host Driver"
 
 struct kmem_cache *stub_priv_cache;
+
 /*
  * busid_tables defines matching busids that usbip can grab. A user can change
  * dynamically what device is locally used and what device is exported to a
@@ -39,6 +40,8 @@
 
 static void init_busid_table(void)
 {
+	int i;
+
 	/*
 	 * This also sets the bus_table[i].status to
 	 * STUB_BUSID_OTHER, which is 0.
@@ -46,6 +49,9 @@
 	memset(busid_table, 0, sizeof(busid_table));
 
 	spin_lock_init(&busid_table_lock);
+
+	for (i = 0; i < MAX_BUSID; i++)
+		spin_lock_init(&busid_table[i].busid_lock);
 }
 
 /*
@@ -57,15 +63,20 @@
 	int i;
 	int idx = -1;
 
-	for (i = 0; i < MAX_BUSID; i++)
+	for (i = 0; i < MAX_BUSID; i++) {
+		spin_lock(&busid_table[i].busid_lock);
 		if (busid_table[i].name[0])
 			if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
 				idx = i;
+				spin_unlock(&busid_table[i].busid_lock);
 				break;
 			}
+		spin_unlock(&busid_table[i].busid_lock);
+	}
 	return idx;
 }
 
+/* Returns holding busid_lock. Should call put_busid_priv() to unlock */
 struct bus_id_priv *get_busid_priv(const char *busid)
 {
 	int idx;
@@ -73,13 +84,22 @@
 
 	spin_lock(&busid_table_lock);
 	idx = get_busid_idx(busid);
-	if (idx >= 0)
+	if (idx >= 0) {
 		bid = &(busid_table[idx]);
+		/* get busid_lock before returning */
+		spin_lock(&bid->busid_lock);
+	}
 	spin_unlock(&busid_table_lock);
 
 	return bid;
 }
 
+void put_busid_priv(struct bus_id_priv *bid)
+{
+	if (bid)
+		spin_unlock(&bid->busid_lock);
+}
+
 static int add_match_busid(char *busid)
 {
 	int i;
@@ -92,15 +112,19 @@
 		goto out;
 	}
 
-	for (i = 0; i < MAX_BUSID; i++)
+	for (i = 0; i < MAX_BUSID; i++) {
+		spin_lock(&busid_table[i].busid_lock);
 		if (!busid_table[i].name[0]) {
 			strlcpy(busid_table[i].name, busid, BUSID_SIZE);
 			if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
 			    (busid_table[i].status != STUB_BUSID_REMOV))
 				busid_table[i].status = STUB_BUSID_ADDED;
 			ret = 0;
+			spin_unlock(&busid_table[i].busid_lock);
 			break;
 		}
+		spin_unlock(&busid_table[i].busid_lock);
+	}
 
 out:
 	spin_unlock(&busid_table_lock);
@@ -121,6 +145,8 @@
 	/* found */
 	ret = 0;
 
+	spin_lock(&busid_table[idx].busid_lock);
+
 	if (busid_table[idx].status == STUB_BUSID_OTHER)
 		memset(busid_table[idx].name, 0, BUSID_SIZE);
 
@@ -128,6 +154,7 @@
 	    (busid_table[idx].status != STUB_BUSID_ADDED))
 		busid_table[idx].status = STUB_BUSID_REMOV;
 
+	spin_unlock(&busid_table[idx].busid_lock);
 out:
 	spin_unlock(&busid_table_lock);
 
@@ -140,9 +167,12 @@
 	char *out = buf;
 
 	spin_lock(&busid_table_lock);
-	for (i = 0; i < MAX_BUSID; i++)
+	for (i = 0; i < MAX_BUSID; i++) {
+		spin_lock(&busid_table[i].busid_lock);
 		if (busid_table[i].name[0])
 			out += sprintf(out, "%s ", busid_table[i].name);
+		spin_unlock(&busid_table[i].busid_lock);
+	}
 	spin_unlock(&busid_table_lock);
 	out += sprintf(out, "\n");
 
@@ -184,6 +214,51 @@
 static DRIVER_ATTR(match_busid, S_IRUSR | S_IWUSR, show_match_busid,
 		   store_match_busid);
 
+static int do_rebind(char *busid, struct bus_id_priv *busid_priv)
+{
+	int ret;
+
+	/* device_attach() callers should hold parent lock for USB */
+	if (busid_priv->udev->dev.parent)
+		device_lock(busid_priv->udev->dev.parent);
+	ret = device_attach(&busid_priv->udev->dev);
+	if (busid_priv->udev->dev.parent)
+		device_unlock(busid_priv->udev->dev.parent);
+	if (ret < 0) {
+		dev_err(&busid_priv->udev->dev, "rebind failed\n");
+		return ret;
+	}
+	return 0;
+}
+
+static void stub_device_rebind(void)
+{
+#if IS_MODULE(CONFIG_USBIP_HOST)
+	struct bus_id_priv *busid_priv;
+	int i;
+
+	/* update status to STUB_BUSID_OTHER so probe ignores the device */
+	spin_lock(&busid_table_lock);
+	for (i = 0; i < MAX_BUSID; i++) {
+		if (busid_table[i].name[0] &&
+		    busid_table[i].shutdown_busid) {
+			busid_priv = &(busid_table[i]);
+			busid_priv->status = STUB_BUSID_OTHER;
+		}
+	}
+	spin_unlock(&busid_table_lock);
+
+	/* now run rebind - no need to hold locks. driver files are removed */
+	for (i = 0; i < MAX_BUSID; i++) {
+		if (busid_table[i].name[0] &&
+		    busid_table[i].shutdown_busid) {
+			busid_priv = &(busid_table[i]);
+			do_rebind(busid_table[i].name, busid_priv);
+		}
+	}
+#endif
+}
+
 static ssize_t rebind_store(struct device_driver *dev, const char *buf,
 				 size_t count)
 {
@@ -201,11 +276,17 @@
 	if (!bid)
 		return -ENODEV;
 
-	ret = device_attach(&bid->udev->dev);
-	if (ret < 0) {
-		dev_err(&bid->udev->dev, "rebind failed\n");
+	/* mark the device for deletion so probe ignores it during rescan */
+	bid->status = STUB_BUSID_OTHER;
+	/* release the busid lock */
+	put_busid_priv(bid);
+
+	ret = do_rebind((char *) buf, bid);
+	if (ret < 0)
 		return ret;
-	}
+
+	/* delete device from busid_table */
+	del_match_busid((char *) buf);
 
 	return count;
 }
@@ -328,6 +409,9 @@
 	 */
 	usb_deregister_device_driver(&stub_driver);
 
+	/* initiate scan to attach devices */
+	stub_device_rebind();
+
 	kmem_cache_destroy(stub_priv_cache);
 }
 
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index f0b955f..109e65b 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -258,7 +258,7 @@
 #define	VUDC_EVENT_ERROR_USB	(USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
 #define	VUDC_EVENT_ERROR_MALLOC	(USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
 
-#define	VDEV_EVENT_REMOVED	(USBIP_EH_SHUTDOWN | USBIP_EH_BYE)
+#define	VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
 #define	VDEV_EVENT_DOWN		(USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
 #define	VDEV_EVENT_ERROR_TCP	(USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
 #define	VDEV_EVENT_ERROR_MALLOC	(USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
index f163566..f8f7f38 100644
--- a/drivers/usb/usbip/usbip_event.c
+++ b/drivers/usb/usbip/usbip_event.c
@@ -105,10 +105,6 @@
 			unset_event(ud, USBIP_EH_UNUSABLE);
 		}
 
-		/* Stop the error handler. */
-		if (ud->event & USBIP_EH_BYE)
-			usbip_dbg_eh("removed %p\n", ud);
-
 		wake_up(&ud->eh_waitq);
 	}
 }
diff --git a/drivers/video/fbdev/msm/Kconfig b/drivers/video/fbdev/msm/Kconfig
index e8f902b..a5e0662 100644
--- a/drivers/video/fbdev/msm/Kconfig
+++ b/drivers/video/fbdev/msm/Kconfig
@@ -82,6 +82,16 @@
 	MHL (Mobile High-Definition Link) technology
 	uses USB connector to output HDMI content
 
+config FB_MSM_MDSS_SPI_PANEL
+        depends on FB_MSM_MDSS
+        bool "Support SPI panel feature"
+        default n
+        ---help---
+        The MDSS SPI Panel provides support for transmittimg SPI signals of
+        MDSS frame buffer data to connected panel. Limited by SPI rate, the
+        current max fps only reach to 27 fps, and limited by MDP hardware
+        architecture only supply on MDP3
+
 config FB_MSM_MDSS_MHL3
 	depends on FB_MSM_MDSS_HDMI_PANEL
 	bool "MHL3 SII8620 Support"
diff --git a/drivers/video/fbdev/msm/Makefile b/drivers/video/fbdev/msm/Makefile
index 81d4953..26a940c 100644
--- a/drivers/video/fbdev/msm/Makefile
+++ b/drivers/video/fbdev/msm/Makefile
@@ -46,6 +46,11 @@
 obj-$(CONFIG_FB_MSM_MDSS) += mdss-dsi.o
 obj-$(CONFIG_FB_MSM_MDSS) += mdss_panel.o
 
+ifeq ($(CONFIG_SPI_QUP), y)
+obj-$(CONFIG_FB_MSM_MDSS_SPI_PANEL) += mdss_spi_client.o
+obj-$(CONFIG_FB_MSM_MDSS_SPI_PANEL) += mdss_spi_panel.o
+endif
+
 ifneq ($(CONFIG_FB_MSM_MDSS_MDP3), y)
 obj-$(CONFIG_FB_MSM_MDSS) += mdss_hdmi_util.o
 obj-$(CONFIG_FB_MSM_MDSS) += mdss_hdmi_edid.o
diff --git a/drivers/video/fbdev/msm/dsi_status_6g.c b/drivers/video/fbdev/msm/dsi_status_6g.c
index 88bf0aa..8a32902 100644
--- a/drivers/video/fbdev/msm/dsi_status_6g.c
+++ b/drivers/video/fbdev/msm/dsi_status_6g.c
@@ -18,6 +18,7 @@
 
 #include "mdss_dsi.h"
 #include "mdss_mdp.h"
+#include "mdss_debug.h"
 
 /*
  * mdss_check_te_status() - Check the status of panel for TE based ESD.
@@ -155,6 +156,7 @@
 		ctl->ops.wait_pingpong(ctl, NULL);
 
 	pr_debug("%s: DSI ctrl wait for ping pong done\n", __func__);
+	MDSS_XLOG(mipi->mode);
 
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
 	ret = ctrl_pdata->check_status(ctrl_pdata);
diff --git a/drivers/video/fbdev/msm/dsi_status_v2.c b/drivers/video/fbdev/msm/dsi_status_v2.c
index 35b0984..87720cf 100644
--- a/drivers/video/fbdev/msm/dsi_status_v2.c
+++ b/drivers/video/fbdev/msm/dsi_status_v2.c
@@ -18,6 +18,7 @@
 
 #include "mdss_dsi.h"
 #include "mdp3_ctrl.h"
+#include "mdss_spi_panel.h"
 
 /*
  * mdp3_check_te_status() - Check the status of panel for TE based ESD.
@@ -165,3 +166,79 @@
 	mdss_fb_report_panel_dead(pdsi_status->mfd);
 }
 
+#if defined(CONFIG_FB_MSM_MDSS_SPI_PANEL)
+void mdp3_check_spi_panel_status(struct work_struct *work, uint32_t interval)
+{
+	struct dsi_status_data *pdsi_status = NULL;
+	struct mdss_panel_data *pdata = NULL;
+	struct spi_panel_data *ctrl_pdata = NULL;
+	struct mdp3_session_data *mdp3_session = NULL;
+	int ret = 0;
+
+	pdsi_status = container_of(to_delayed_work(work),
+			struct dsi_status_data, check_status);
+
+	if (!pdsi_status || !(pdsi_status->mfd)) {
+		pr_err("%s: mfd not available\n", __func__);
+		return;
+	}
+
+	pdata = dev_get_platdata(&pdsi_status->mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("%s: panel data not available\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data, panel_data);
+	if (!ctrl_pdata || !ctrl_pdata->check_status) {
+		pr_err("%s: Dsi ctrl or status_check callback not available\n",
+				__func__);
+		return;
+	}
+
+	mdp3_session = pdsi_status->mfd->mdp.private1;
+	if (!mdp3_session) {
+		pr_err("%s: Display is off\n", __func__);
+		return;
+	}
+
+	if (mdp3_session->in_splash_screen) {
+		schedule_delayed_work(&pdsi_status->check_status,
+			msecs_to_jiffies(interval));
+		pr_debug("%s: cont splash is on\n", __func__);
+		return;
+	}
+
+	mutex_lock(&mdp3_session->lock);
+	if (!mdp3_session->status) {
+		pr_debug("%s: display off already\n", __func__);
+		mutex_unlock(&mdp3_session->lock);
+		return;
+	}
+
+	if (!ret)
+		ret = ctrl_pdata->check_status(ctrl_pdata);
+	else
+		pr_err("%s:wait_for_dma_done error\n", __func__);
+	mutex_unlock(&mdp3_session->lock);
+
+	if (mdss_fb_is_power_on_interactive(pdsi_status->mfd)) {
+		if (ret > 0) {
+			schedule_delayed_work(&pdsi_status->check_status,
+				msecs_to_jiffies(interval));
+		} else {
+			char *envp[2] = {"PANEL_ALIVE=0", NULL};
+
+			pdata->panel_info.panel_dead = true;
+			ret = kobject_uevent_env(
+			&pdsi_status->mfd->fbi->dev->kobj, KOBJ_CHANGE, envp);
+			pr_err("%s:panel has gone bad, sending uevent - %s\n",
+			 __func__, envp[0]);
+		}
+	}
+}
+#else
+void mdp3_check_spi_panel_status(struct work_struct *work, uint32_t interval)
+{
+}
+#endif
diff --git a/drivers/video/fbdev/msm/mdp3.c b/drivers/video/fbdev/msm/mdp3.c
index c9db88e..5e9e49c 100644
--- a/drivers/video/fbdev/msm/mdp3.c
+++ b/drivers/video/fbdev/msm/mdp3.c
@@ -56,6 +56,7 @@
 #include "mdss_debug.h"
 #include "mdss_smmu.h"
 #include "mdss.h"
+#include "mdss_spi_panel.h"
 
 #ifndef EXPORT_COMPAT
 #define EXPORT_COMPAT(x)
@@ -109,6 +110,7 @@
 
 static struct mdss_panel_intf pan_types[] = {
 	{"dsi", MDSS_PANEL_INTF_DSI},
+	{"spi", MDSS_PANEL_INTF_SPI},
 };
 static char mdss_mdp3_panel[MDSS_MAX_PANEL_LEN];
 
@@ -127,6 +129,13 @@
 	},
 };
 
+#ifndef CONFIG_FB_MSM_MDSS_SPI_PANEL
+void mdss_spi_panel_bl_ctrl_update(struct mdss_panel_data *pdata, u32 bl_level)
+{
+
+}
+#endif
+
 static irqreturn_t mdp3_irq_handler(int irq, void *ptr)
 {
 	int i = 0;
@@ -1350,7 +1359,11 @@
 						client == MDP3_CLIENT_DMA_P)
 				mdss_smmu_unmap_dma_buf(data->tab_clone,
 					dom, dir, data->srcp_dma_buf);
-			else
+			else if (client == MDP3_CLIENT_SPI) {
+				ion_unmap_kernel(iclient, data->srcp_ihdl);
+				ion_free(iclient, data->srcp_ihdl);
+				data->srcp_ihdl = NULL;
+			} else
 				mdss_smmu_unmap_dma_buf(data->srcp_table,
 					dom, dir, data->srcp_dma_buf);
 			data->mapped = false;
@@ -1416,7 +1429,15 @@
 				data->srcp_dma_buf = NULL;
 				return ret;
 			}
-
+			if (client == MDP3_CLIENT_SPI) {
+				data->srcp_ihdl = ion_import_dma_buf(iclient,
+					data->srcp_dma_buf);
+				if (IS_ERR_OR_NULL(data->srcp_ihdl)) {
+					pr_err("error on ion_import_fd\n");
+					data->srcp_ihdl = NULL;
+					return -EIO;
+				}
+			}
 			data->srcp_attachment =
 			mdss_smmu_dma_buf_attach(data->srcp_dma_buf,
 					&mdp3_res->pdev->dev, dom);
@@ -1449,6 +1470,25 @@
 					data->tab_clone, dom,
 					&data->addr, &data->len,
 					DMA_BIDIRECTIONAL);
+			} else if (client == MDP3_CLIENT_SPI) {
+				void *vaddr;
+
+					if (ion_handle_get_size(iclient,
+						data->srcp_ihdl,
+						(size_t *)&data->len) < 0) {
+						pr_err("get size failed\n");
+						return -EINVAL;
+					}
+					 vaddr = ion_map_kernel(iclient,
+						data->srcp_ihdl);
+					if (IS_ERR_OR_NULL(vaddr)) {
+						pr_err("Mapping failed\n");
+						mdp3_put_img(data, client);
+						return -EINVAL;
+					}
+					data->addr = (dma_addr_t) vaddr;
+					data->len -= img->offset;
+					return 0;
 			} else {
 				ret = mdss_smmu_map_dma_buf(data->srcp_dma_buf,
 					data->srcp_table, dom, &data->addr,
@@ -1741,6 +1781,8 @@
 	if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
 		status = MDP3_REG_READ(MDP3_REG_DSI_VIDEO_EN);
 		rc = status & 0x1;
+	} else if (pdata->panel_info.type == SPI_PANEL) {
+		rc = is_spi_panel_continuous_splash_on(pdata);
 	} else {
 		status = MDP3_REG_READ(MDP3_REG_DMA_P_CONFIG);
 		status &= 0x180000;
@@ -1777,7 +1819,11 @@
 	mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, mdp_clk_rate,
 			MDP3_CLIENT_DMA_P);
 
-	rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, ab, ib);
+	/*DMA not used on SPI interface, remove DMA bus voting*/
+	if (panel_info->type == SPI_PANEL)
+		rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, 0, 0);
+	else
+		rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, ab, ib);
 	bus_handle->restore_ab[MDP3_CLIENT_DMA_P] = ab;
 	bus_handle->restore_ib[MDP3_CLIENT_DMA_P] = ib;
 
@@ -1795,6 +1841,8 @@
 
 	if (panel_info->type == MIPI_VIDEO_PANEL)
 		mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_DSI_VIDEO].active = 1;
+	else if (panel_info->type == SPI_PANEL)
+		mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_SPI_CMD].active = 1;
 	else
 		mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_DSI_CMD].active = 1;
 
@@ -2485,6 +2533,9 @@
 	mdp3_res->mdss_util->mdp_probe_done = true;
 	pr_debug("%s: END\n", __func__);
 
+	if (mdp3_res->pan_cfg.pan_intf == MDSS_PANEL_INTF_SPI)
+		mdp3_interface.check_dsi_status = mdp3_check_spi_panel_status;
+
 probe_done:
 	if (IS_ERR_VALUE(rc))
 		kfree(mdp3_res->mdp3_hw.irq_info);
diff --git a/drivers/video/fbdev/msm/mdp3.h b/drivers/video/fbdev/msm/mdp3.h
index 7132dee..6b56052 100644
--- a/drivers/video/fbdev/msm/mdp3.h
+++ b/drivers/video/fbdev/msm/mdp3.h
@@ -84,6 +84,7 @@
 	MDP3_CLIENT_DSI = 1,
 	MDP3_CLIENT_PPP,
 	MDP3_CLIENT_IOMMU,
+	MDP3_CLIENT_SPI,
 	MDP3_CLIENT_MAX,
 };
 
@@ -208,6 +209,8 @@
 	bool solid_fill_vote_en;
 	struct list_head reg_bus_clist;
 	struct mutex reg_bus_lock;
+	int bklt_level;
+	int bklt_update;
 	bool twm_en;
 	u32 max_bw;
 
@@ -267,6 +270,7 @@
 void mdp3_enable_regulator(int enable);
 void mdp3_check_dsi_ctrl_status(struct work_struct *work,
 				uint32_t interval);
+void mdp3_check_spi_panel_status(struct work_struct *work, uint32_t interval);
 int mdp3_dynamic_clock_gating_ctrl(int enable);
 int mdp3_footswitch_ctrl(int enable);
 int mdp3_qos_remapper_setup(struct mdss_panel_data *panel);
@@ -279,6 +283,8 @@
 void mdp3_clear_irq(u32 interrupt_mask);
 int mdp3_enable_panic_ctrl(void);
 
+void mdss_spi_panel_bl_ctrl_update(struct mdss_panel_data *pdata, u32 bl_level);
+
 int mdp3_layer_pre_commit(struct msm_fb_data_type *mfd,
 	struct file *file, struct mdp_layer_commit_v1 *commit);
 int mdp3_layer_atomic_validate(struct msm_fb_data_type *mfd,
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.c b/drivers/video/fbdev/msm/mdp3_ctrl.c
index 2d13210..c976c0e 100644
--- a/drivers/video/fbdev/msm/mdp3_ctrl.c
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.c
@@ -23,11 +23,13 @@
 #include <linux/dma-buf.h>
 #include <linux/pm_runtime.h>
 #include <linux/iommu.h>
+#include <linux/msm_ion.h>
 
 #include "mdp3_ctrl.h"
 #include "mdp3.h"
 #include "mdp3_ppp.h"
 #include "mdss_smmu.h"
+#include "mdss_spi_panel.h"
 #include "mdss_sync.h"
 
 #define VSYNC_EXPIRE_TICK	4
@@ -72,7 +74,7 @@
 	bufq->pop_idx = 0;
 }
 
-void mdp3_bufq_deinit(struct mdp3_buffer_queue *bufq)
+void mdp3_bufq_deinit(struct mdp3_buffer_queue *bufq, int client)
 {
 	int count = bufq->count;
 
@@ -83,7 +85,7 @@
 		struct mdp3_img_data *data = &bufq->img_data[bufq->pop_idx];
 
 		bufq->pop_idx = (bufq->pop_idx + 1) % MDP3_MAX_BUF_QUEUE;
-		mdp3_put_img(data, MDP3_CLIENT_DMA_P);
+		mdp3_put_img(data, client);
 	}
 	bufq->count = 0;
 	bufq->push_idx = 0;
@@ -122,6 +124,18 @@
 	return bufq->count;
 }
 
+int mdp3_get_ion_client(struct msm_fb_data_type *mfd)
+{
+	int intf_type;
+
+	intf_type = mdp3_ctrl_get_intf_type(mfd);
+
+	if (intf_type == MDP3_DMA_OUTPUT_SEL_SPI_CMD)
+		return MDP3_CLIENT_SPI;
+	else
+		return MDP3_CLIENT_DMA_P;
+}
+
 void mdp3_ctrl_notifier_register(struct mdp3_session_data *ses,
 	struct notifier_block *notifier)
 {
@@ -174,6 +188,7 @@
 		return;
 
 	mutex_lock(&session->lock);
+	MDSS_XLOG(0x111);
 	if (session->vsync_enabled ||
 		atomic_read(&session->vsync_countdown) > 0) {
 		mutex_unlock(&session->lock);
@@ -182,10 +197,17 @@
 		return;
 	}
 
+	if (!session->clk_on) {
+		mutex_unlock(&session->lock);
+		pr_debug("%s: Clk shut down is done\n", __func__);
+		MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__);
+		return;
+	}
 	if (session->intf->active) {
 retry_dma_done:
 		rc = wait_for_completion_timeout(&session->dma_completion,
 							WAIT_DMA_TIMEOUT);
+		MDSS_XLOG(0x222);
 		if (rc <= 0) {
 			struct mdss_panel_data *panel;
 
@@ -196,6 +218,7 @@
 				if (--retry_count) {
 					pr_err("dmap is busy, retry %d\n",
 						retry_count);
+					MDSS_XLOG(__LINE__, retry_count);
 					goto retry_dma_done;
 				}
 				pr_err("dmap is still busy, bug_on\n");
@@ -299,8 +322,11 @@
 	struct mdp3_notification vsync_client;
 	struct mdp3_notification *arg = NULL;
 	bool mod_vsync_timer = false;
+	int intf_type;
 
 	pr_debug("mdp3_ctrl_vsync_enable =%d\n", enable);
+
+	intf_type = mdp3_ctrl_get_intf_type(mfd);
 	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
 	if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
 		!mdp3_session->intf)
@@ -338,18 +364,25 @@
 		}
 	}
 
-	mdp3_clk_enable(1, 0);
-	mdp3_session->dma->vsync_enable(mdp3_session->dma, arg);
-	mdp3_clk_enable(0, 0);
+	if (intf_type == MDP3_DMA_OUTPUT_SEL_SPI_CMD) {
+		mdp3_spi_vsync_enable(mdp3_session->panel, arg);
+	} else {
+		mdp3_clk_enable(1, 0);
+		mdp3_session->dma->vsync_enable(mdp3_session->dma, arg);
+		mdp3_clk_enable(0, 0);
+	}
 
 	/*
 	 * Need to fake vsync whenever dsi interface is not
 	 * active or when dsi clocks are currently off
 	 */
-	if (mod_vsync_timer) {
+	if (mod_vsync_timer && (intf_type != MDP3_DMA_OUTPUT_SEL_SPI_CMD)) {
 		mod_timer(&mdp3_session->vsync_timer,
 			jiffies + msecs_to_jiffies(mdp3_session->vsync_period));
-	} else if (!enable) {
+	} else if (enable && !mdp3_session->clk_on) {
+		mdp3_ctrl_reset_countdown(mdp3_session, mfd);
+		mdp3_ctrl_clk_enable(mfd, 1);
+	} else if (!enable && (intf_type != MDP3_DMA_OUTPUT_SEL_SPI_CMD)) {
 		del_timer(&mdp3_session->vsync_timer);
 	}
 
@@ -380,7 +413,7 @@
 		return -EFAULT;
 	p_req = p + sizeof(req_list_header);
 	count = req_list_header.count;
-	if (count < 0 || count >= MAX_BLIT_REQ)
+	if (count < 0 || count > MAX_BLIT_REQ)
 		return -EINVAL;
 	rc = mdp3_ppp_parse_req(p_req, &req_list_header, 1);
 	if (!rc)
@@ -399,7 +432,7 @@
 		return -EFAULT;
 	p_req = p + sizeof(struct mdp_blit_req_list);
 	count = req_list_header.count;
-	if (count < 0 || count >= MAX_BLIT_REQ)
+	if (count < 0 || count > MAX_BLIT_REQ)
 		return -EINVAL;
 	req_list_header.sync.acq_fen_fd_cnt = 0;
 	rc = mdp3_ppp_parse_req(p_req, &req_list_header, 0);
@@ -588,14 +621,32 @@
 static int mdp3_ctrl_res_req_bus(struct msm_fb_data_type *mfd, int status)
 {
 	int rc = 0;
+	u32 vtotal = 0;
+	int frame_rate = DEFAULT_FRAME_RATE;
 
 	if (status) {
+		struct mdss_panel_info *panel_info = mfd->panel_info;
 		u64 ab = 0;
 		u64 ib = 0;
 
+		frame_rate = mdss_panel_get_framerate(mfd->panel_info,
+			FPS_RESOLUTION_HZ);
 		mdp3_calc_dma_res(mfd->panel_info, NULL, &ab, &ib,
 			ppp_bpp(mfd->fb_imgType));
-		rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, ab, ib);
+		vtotal = panel_info->yres + panel_info->lcdc.v_back_porch +
+			panel_info->lcdc.v_front_porch +
+			panel_info->lcdc.v_pulse_width;
+		ab = panel_info->xres * vtotal * ppp_bpp(mfd->fb_imgType);
+		ab *= frame_rate;
+		ib = ab;
+
+		/*DMA not used on SPI interface, remove DMA bus voting*/
+		if (mdp3_ctrl_get_intf_type(mfd) ==
+			 MDP3_DMA_OUTPUT_SEL_SPI_CMD)
+			rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, 0, 0);
+		else
+			rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P,
+				ab, ib);
 	} else {
 		rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, 0, 0);
 	}
@@ -644,6 +695,9 @@
 	case LCDC_PANEL:
 		type = MDP3_DMA_OUTPUT_SEL_LCDC;
 		break;
+	case SPI_PANEL:
+		type = MDP3_DMA_OUTPUT_SEL_SPI_CMD;
+		break;
 	default:
 		type = MDP3_DMA_OUTPUT_SEL_MAX;
 	}
@@ -661,8 +715,11 @@
 	case MDP_RGB_888:
 		format = MDP3_DMA_IBUF_FORMAT_RGB888;
 		break;
+	case MDP_XRGB_8888:
 	case MDP_ARGB_8888:
 	case MDP_RGBA_8888:
+	case MDP_BGRA_8888:
+	case MDP_RGBX_8888:
 		format = MDP3_DMA_IBUF_FORMAT_XRGB8888;
 		break;
 	default:
@@ -705,7 +762,8 @@
 
 	cfg.type = mdp3_ctrl_get_intf_type(mfd);
 	if (cfg.type == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
-		cfg.type == MDP3_DMA_OUTPUT_SEL_LCDC) {
+		cfg.type == MDP3_DMA_OUTPUT_SEL_LCDC ||
+		cfg.type == MDP3_DMA_OUTPUT_SEL_SPI_CMD) {
 		video->hsync_period = hsync_period;
 		video->hsync_pulse_width = h_pulse_width;
 		video->vsync_period = vsync_period;
@@ -753,7 +811,7 @@
 	struct fb_var_screeninfo *var;
 	struct mdp3_dma_output_config outputConfig;
 	struct mdp3_dma_source sourceConfig;
-	int frame_rate = mfd->panel_info->mipi.frame_rate;
+	int frame_rate = DEFAULT_FRAME_RATE;
 	int vbp, vfp, vspw;
 	int vtotal, vporch;
 	struct mdp3_notification dma_done_callback;
@@ -762,6 +820,7 @@
 
 	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
 
+	frame_rate = mdss_panel_get_framerate(panel_info, FPS_RESOLUTION_HZ);
 	vbp = panel_info->lcdc.v_back_porch;
 	vfp = panel_info->lcdc.v_front_porch;
 	vspw = panel_info->lcdc.v_pulse_width;
@@ -802,7 +861,7 @@
 		sourceConfig.stride = fix->line_length;
 	}
 
-	te.frame_rate = panel_info->mipi.frame_rate;
+	te.frame_rate = frame_rate;
 	te.hw_vsync_mode = panel_info->mipi.hw_vsync_mode;
 	te.tear_check_en = panel_info->te.tear_check_en;
 	te.sync_cfg_height = panel_info->te.sync_cfg_height;
@@ -991,6 +1050,7 @@
 static int mdp3_ctrl_off(struct msm_fb_data_type *mfd)
 {
 	int rc = 0;
+	int client = 0;
 	bool intf_stopped = true;
 	struct mdp3_session_data *mdp3_session;
 	struct mdss_panel_data *panel;
@@ -1014,8 +1074,10 @@
 	MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__, mdss_fb_is_power_on_ulp(mfd),
 		mfd->panel_power_state);
 	panel = mdp3_session->panel;
-	mutex_lock(&mdp3_session->lock);
 
+	cancel_work_sync(&mdp3_session->clk_off_work);
+	mutex_lock(&mdp3_session->lock);
+	MDSS_XLOG(0x111);
 	pr_debug("Requested power state = %d\n", mfd->panel_power_state);
 	if (mdss_fb_is_power_on_lp(mfd)) {
 		/*
@@ -1142,10 +1204,11 @@
 				pr_err("%s: pm_runtime_put failed (rc %d)\n",
 					__func__, rc);
 		}
-		mdp3_bufq_deinit(&mdp3_session->bufq_out);
+		client = mdp3_get_ion_client(mfd);
+		mdp3_bufq_deinit(&mdp3_session->bufq_out, client);
 		if (mdp3_session->overlay.id != MSMFB_NEW_REQUEST) {
 			mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
-			mdp3_bufq_deinit(&mdp3_session->bufq_in);
+			mdp3_bufq_deinit(&mdp3_session->bufq_in, client);
 		}
 	}
 
@@ -1175,6 +1238,10 @@
 		mdp3_ctrl_clk_enable(mdp3_session->mfd, 0);
 	}
 off_error:
+	if (mdp3_session->overlay.id != MSMFB_NEW_REQUEST) {
+		mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
+		mdp3_bufq_deinit(&mdp3_session->bufq_in, client);
+	}
 	MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__);
 	mutex_unlock(&mdp3_session->lock);
 	/* Release the last reference to the runtime device */
@@ -1322,14 +1389,16 @@
 	struct fb_info *fbi = mfd->fbi;
 	struct fb_fix_screeninfo *fix;
 	int format;
+	int client;
 
 	fix = &fbi->fix;
 	format = mdp3_ctrl_get_source_format(mfd->fb_imgType);
 	mutex_lock(&mdp3_session->lock);
 
+	client = mdp3_get_ion_client(mfd);
 	if (mdp3_session->overlay.id == ndx && ndx == 1) {
 		mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
-		mdp3_bufq_deinit(&mdp3_session->bufq_in);
+		mdp3_bufq_deinit(&mdp3_session->bufq_in, client);
 	} else {
 		rc = -EINVAL;
 	}
@@ -1348,18 +1417,20 @@
 	struct msmfb_data *img = &req->data;
 	struct mdp3_img_data data;
 	struct mdp3_dma *dma = mdp3_session->dma;
+	int client;
 
+	client = mdp3_get_ion_client(mfd);
 	memset(&data, 0, sizeof(struct mdp3_img_data));
-	if (mfd->panel.type == MIPI_CMD_PANEL)
+	if (mfd->panel.type == MIPI_CMD_PANEL || client == MDP3_CLIENT_SPI)
 		is_panel_type_cmd = true;
 	if (is_panel_type_cmd) {
-		rc = mdp3_iommu_enable(MDP3_CLIENT_DMA_P);
+		rc = mdp3_iommu_enable(client);
 		if (rc) {
 			pr_err("fail to enable iommu\n");
 			return rc;
 		}
 	}
-	rc = mdp3_get_img(img, &data, MDP3_CLIENT_DMA_P);
+	rc = mdp3_get_img(img, &data, client);
 	if (rc) {
 		pr_err("fail to get overlay buffer\n");
 		goto err;
@@ -1369,14 +1440,14 @@
 		pr_err("buf size(0x%lx) is smaller than dma config(0x%x)\n",
 			data.len, (dma->source_config.stride *
 			dma->source_config.height));
-		mdp3_put_img(&data, MDP3_CLIENT_DMA_P);
+		mdp3_put_img(&data, client);
 		rc = -EINVAL;
 		goto err;
 	}
 	rc = mdp3_bufq_push(&mdp3_session->bufq_in, &data);
 	if (rc) {
 		pr_err("fail to queue the overlay buffer, buffer drop\n");
-		mdp3_put_img(&data, MDP3_CLIENT_DMA_P);
+		mdp3_put_img(&data, client);
 		goto err;
 	}
 	rc = 0;
@@ -1435,8 +1506,11 @@
 	struct mdp3_img_data *data;
 	struct mdss_panel_info *panel_info;
 	int rc = 0;
+	int client;
 	static bool splash_done;
 	struct mdss_panel_data *panel;
+	int frame_rate = DEFAULT_FRAME_RATE;
+	int stride;
 
 	if (!mfd || !mfd->mdp.private1)
 		return -EINVAL;
@@ -1446,6 +1520,9 @@
 	if (!mdp3_session || !mdp3_session->dma)
 		return -EINVAL;
 
+	frame_rate = mdss_panel_get_framerate(panel_info, FPS_RESOLUTION_HZ);
+	client = mdp3_get_ion_client(mfd);
+
 	if (mdp3_bufq_count(&mdp3_session->bufq_in) == 0) {
 		pr_debug("no buffer in queue yet\n");
 		return -EPERM;
@@ -1482,6 +1559,7 @@
 	}
 	mutex_unlock(&mdp3_res->fs_idle_pc_lock);
 
+	cancel_work_sync(&mdp3_session->clk_off_work);
 	mutex_lock(&mdp3_session->lock);
 
 	if (!mdp3_session->status) {
@@ -1489,13 +1567,23 @@
 		mutex_unlock(&mdp3_session->lock);
 		return -EPERM;
 	}
-
+	MDSS_XLOG(0x111);
 	mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_BEGIN);
 	data = mdp3_bufq_pop(&mdp3_session->bufq_in);
 	if (data) {
 		mdp3_ctrl_reset_countdown(mdp3_session, mfd);
 		mdp3_ctrl_clk_enable(mfd, 1);
-		if (mdp3_session->dma->update_src_cfg &&
+		stride = mdp3_session->dma->source_config.stride;
+		if (mdp3_ctrl_get_intf_type(mfd) ==
+			MDP3_DMA_OUTPUT_SEL_SPI_CMD){
+			mdp3_session->intf->active = false;
+			msm_ion_do_cache_op(mdp3_res->ion_client,
+				data->srcp_ihdl, (void *)(int)data->addr,
+					data->len, ION_IOC_INV_CACHES);
+			rc = mdss_spi_panel_kickoff(mdp3_session->panel,
+				(void *)(int)data->addr, (int)data->len,
+					stride);
+		} else if (mdp3_session->dma->update_src_cfg &&
 				panel_info->partial_update_enabled) {
 			panel->panel_info.roi.x = mdp3_session->dma->roi.x;
 			panel->panel_info.roi.y = mdp3_session->dma->roi.y;
@@ -1515,13 +1603,15 @@
 				MDP_NOTIFY_FRAME_TIMEOUT);
 		} else {
 			if (mdp3_ctrl_get_intf_type(mfd) ==
-						MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) {
+				MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+				mdp3_ctrl_get_intf_type(mfd) ==
+					MDP3_DMA_OUTPUT_SEL_SPI_CMD) {
 				mdp3_ctrl_notify(mdp3_session,
 					MDP_NOTIFY_FRAME_DONE);
 			}
 		}
 		mdp3_session->dma_active = 1;
-		init_completion(&mdp3_session->dma_completion);
+		reinit_completion(&mdp3_session->dma_completion);
 		mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_FLUSHED);
 		mdp3_bufq_push(&mdp3_session->bufq_out, data);
 	}
@@ -1530,16 +1620,16 @@
 		mdp3_release_splash_memory(mfd);
 		data = mdp3_bufq_pop(&mdp3_session->bufq_out);
 		if (data)
-			mdp3_put_img(data, MDP3_CLIENT_DMA_P);
+			mdp3_put_img(data, client);
 	}
 
 	if (mdp3_session->first_commit) {
 		/*wait to ensure frame is sent to panel*/
 		if (panel_info->mipi.post_init_delay)
-			msleep(((1000 / panel_info->mipi.frame_rate) + 1) *
+			msleep(((1000 / frame_rate) + 1) *
 					panel_info->mipi.post_init_delay);
 		else
-			msleep(1000 / panel_info->mipi.frame_rate);
+			msleep((1000 / frame_rate) + 1);
 		mdp3_session->first_commit = false;
 		if (panel)
 			rc |= panel->event_handler(panel,
@@ -1554,6 +1644,12 @@
 		mdp3_session->esd_recovery = false;
 	}
 
+	/*Update backlight only if its changed*/
+	if (mdp3_res->bklt_level && mdp3_res->bklt_update) {
+		mdss_spi_panel_bl_ctrl_update(panel, mdp3_res->bklt_level);
+		mdp3_res->bklt_update = false;
+	}
+
 	/* start vsync tick countdown for cmd mode if vsync isn't enabled */
 	if (mfd->panel.type == MIPI_CMD_PANEL && !mdp3_session->vsync_enabled)
 		mdp3_ctrl_vsync_enable(mdp3_session->mfd, 0);
@@ -1670,7 +1766,7 @@
 			}
 		}
 		mdp3_session->dma_active = 1;
-		init_completion(&mdp3_session->dma_completion);
+		reinit_completion(&mdp3_session->dma_completion);
 		mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_FLUSHED);
 	} else {
 		pr_debug("mdp3_ctrl_pan_display no memory, stop interface");
@@ -1680,17 +1776,18 @@
 	}
 
 	panel = mdp3_session->panel;
-	if (mdp3_session->first_commit) {
-		/*wait to ensure frame is sent to panel*/
-		if (panel_info->mipi.post_init_delay)
-			msleep(((1000 / panel_info->mipi.frame_rate) + 1) *
-					panel_info->mipi.post_init_delay);
-		else
-			msleep(1000 / panel_info->mipi.frame_rate);
-		mdp3_session->first_commit = false;
-		if (panel)
-			panel->event_handler(panel, MDSS_EVENT_POST_PANEL_ON,
-					NULL);
+	if (mdp3_ctrl_get_intf_type(mfd) != MDP3_DMA_OUTPUT_SEL_SPI_CMD) {
+		if (mdp3_session->first_commit) {
+			if (panel_info->mipi.init_delay)
+				msleep(((1000 / panel_info->mipi.frame_rate)
+				+ 1) * panel_info->mipi.post_init_delay);
+			else
+				msleep(1000 / panel_info->mipi.frame_rate);
+					mdp3_session->first_commit = false;
+			if (panel)
+				panel->event_handler(panel,
+					MDSS_EVENT_POST_PANEL_ON, NULL);
+		}
 	}
 
 	mdp3_session->vsync_before_commit = 0;
@@ -1701,6 +1798,11 @@
 		mdp3_session->esd_recovery = false;
 	}
 
+	/*Update backlight only if its changed*/
+	if (mdp3_res->bklt_level && mdp3_res->bklt_update) {
+		mdss_spi_panel_bl_ctrl_update(panel, mdp3_res->bklt_level);
+		mdp3_res->bklt_update = false;
+	}
 
 pan_error:
 	mutex_unlock(&mdp3_session->lock);
@@ -1741,7 +1843,8 @@
 	switch (metadata->op) {
 	case metadata_op_frame_rate:
 		metadata->data.panel_frame_rate =
-			mfd->panel_info->mipi.frame_rate;
+			 mdss_panel_get_framerate(mfd->panel_info,
+				FPS_RESOLUTION_HZ);
 		break;
 	case metadata_op_get_caps:
 		metadata->data.caps.mdp_rev = 305;
@@ -2711,6 +2814,8 @@
 		}
 		mutex_unlock(&mdp3_res->fs_idle_pc_lock);
 		rc = mdp3_ctrl_async_blit_req(mfd, argp);
+		if (!rc)
+			cancel_work_sync(&mdp3_session->clk_off_work);
 		break;
 	case MSMFB_BLIT:
 		mutex_lock(&mdp3_res->fs_idle_pc_lock);
@@ -2718,6 +2823,8 @@
 			mdp3_ctrl_reset(mfd);
 		mutex_unlock(&mdp3_res->fs_idle_pc_lock);
 		rc = mdp3_ctrl_blit_req(mfd, argp);
+		if (!rc)
+			cancel_work_sync(&mdp3_session->clk_off_work);
 		break;
 	case MSMFB_METADATA_GET:
 		rc = copy_from_user(&metadata, argp, sizeof(metadata));
@@ -2874,6 +2981,7 @@
 	struct msm_mdp_interface *mdp3_interface = &mfd->mdp;
 	struct mdp3_session_data *mdp3_session = NULL;
 	u32 intf_type = MDP3_DMA_OUTPUT_SEL_DSI_VIDEO;
+	int frame_rate = DEFAULT_FRAME_RATE;
 	int rc;
 	int splash_mismatch = 0;
 	struct sched_param sched = { .sched_priority = 16 };
@@ -2883,6 +2991,8 @@
 	if (rc)
 		splash_mismatch = 1;
 
+	frame_rate = mdss_panel_get_framerate(mfd->panel_info,
+		FPS_RESOLUTION_HZ);
 	mdp3_interface->on_fnc = mdp3_ctrl_on;
 	mdp3_interface->off_fnc = mdp3_ctrl_off;
 	mdp3_interface->do_histogram = NULL;
@@ -2961,10 +3071,11 @@
 	init_timer(&mdp3_session->vsync_timer);
 	mdp3_session->vsync_timer.function = mdp3_vsync_timer_func;
 	mdp3_session->vsync_timer.data = (u32)mdp3_session;
-	mdp3_session->vsync_period = 1000 / mfd->panel_info->mipi.frame_rate;
+	mdp3_session->vsync_period = 1000 / frame_rate;
 	mfd->mdp.private1 = mdp3_session;
 	init_completion(&mdp3_session->dma_completion);
-	if (intf_type != MDP3_DMA_OUTPUT_SEL_DSI_VIDEO)
+	if (intf_type != MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+		intf_type != MDP3_DMA_OUTPUT_SEL_SPI_CMD)
 		mdp3_session->wait_for_dma_done = mdp3_wait_for_dma_done;
 
 	rc = sysfs_create_group(&dev->kobj, &vsync_fs_attr_group);
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.h b/drivers/video/fbdev/msm/mdp3_ctrl.h
index b7b667b..5193af1 100644
--- a/drivers/video/fbdev/msm/mdp3_ctrl.h
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.h
@@ -84,12 +84,13 @@
 	struct work_struct retire_work;
 };
 
-void mdp3_bufq_deinit(struct mdp3_buffer_queue *bufq);
+void mdp3_bufq_deinit(struct mdp3_buffer_queue *bufq, int client);
 int mdp3_ctrl_init(struct msm_fb_data_type *mfd);
 int mdp3_bufq_push(struct mdp3_buffer_queue *bufq,
 			struct mdp3_img_data *data);
 int mdp3_ctrl_get_source_format(u32 imgType);
 int mdp3_ctrl_get_pack_pattern(u32 imgType);
 int mdp3_ctrl_reset(struct msm_fb_data_type *mfd);
+int mdp3_get_ion_client(struct msm_fb_data_type *mfd);
 
 #endif /* MDP3_CTRL_H */
diff --git a/drivers/video/fbdev/msm/mdp3_dma.c b/drivers/video/fbdev/msm/mdp3_dma.c
index 089d32d..b223c87 100644
--- a/drivers/video/fbdev/msm/mdp3_dma.c
+++ b/drivers/video/fbdev/msm/mdp3_dma.c
@@ -114,7 +114,8 @@
 	}
 
 	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
-		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC) {
+		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC ||
+		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_SPI_CMD) {
 		if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC)
 			mdp3_irq_enable(MDP3_INTR_LCDC_START_OF_FRAME);
 	} else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
@@ -150,7 +151,8 @@
 	}
 
 	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
-		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC) {
+		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC ||
+		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_SPI_CMD) {
 		if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC)
 			mdp3_irq_disable(MDP3_INTR_LCDC_START_OF_FRAME);
 	} else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
@@ -665,6 +667,7 @@
 	ATRACE_BEGIN(__func__);
 	pr_debug("mdp3_dmap_update\n");
 
+	MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__);
 	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
 		cb_type = MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
 		if (intf->active) {
@@ -757,6 +760,7 @@
 	unsigned long flag;
 	int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
 
+	MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__);
 	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
 		cb_type = MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
 		if (intf->active)
@@ -965,6 +969,8 @@
 	val = MDP3_REG_READ(MDP3_REG_DISPLAY_STATUS);
 	pr_err("%s DMAP Status %s\n", __func__,
 		(val & MDP3_DMA_P_BUSY_BIT) ? "BUSY":"IDLE");
+	MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__,
+		(val & MDP3_DMA_P_BUSY_BIT) ? 1:0);
 	return val & MDP3_DMA_P_BUSY_BIT;
 }
 
@@ -1056,7 +1062,7 @@
 	MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, 0);
 	MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, 0xfffffff);
 
-	init_completion(&dma->dma_comp);
+	reinit_completion(&dma->dma_comp);
 	dma->vsync_client.handler = NULL;
 	return ret;
 }
@@ -1264,6 +1270,22 @@
 	return 0;
 }
 
+static int spi_cmd_config(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg)
+{
+	return 0;
+}
+
+static int spi_cmd_start(struct mdp3_intf *intf)
+{
+	intf->active = true;
+	return 0;
+}
+
+static int spi_cmd_stop(struct mdp3_intf *intf)
+{
+	intf->active = false;
+	return 0;
+}
 int mdp3_intf_init(struct mdp3_intf *intf)
 {
 	switch (intf->cfg.type) {
@@ -1282,6 +1304,11 @@
 		intf->start = dsi_cmd_start;
 		intf->stop = dsi_cmd_stop;
 		break;
+	case MDP3_DMA_OUTPUT_SEL_SPI_CMD:
+		intf->config = spi_cmd_config;
+		intf->start = spi_cmd_start;
+		intf->stop = spi_cmd_stop;
+		break;
 
 	default:
 		return -EINVAL;
diff --git a/drivers/video/fbdev/msm/mdp3_dma.h b/drivers/video/fbdev/msm/mdp3_dma.h
index 24caedb9..ec327b6 100644
--- a/drivers/video/fbdev/msm/mdp3_dma.h
+++ b/drivers/video/fbdev/msm/mdp3_dma.h
@@ -47,6 +47,7 @@
 	MDP3_DMA_OUTPUT_SEL_DSI_CMD,
 	MDP3_DMA_OUTPUT_SEL_LCDC,
 	MDP3_DMA_OUTPUT_SEL_DSI_VIDEO,
+	MDP3_DMA_OUTPUT_SEL_SPI_CMD,
 	MDP3_DMA_OUTPUT_SEL_MAX
 };
 
diff --git a/drivers/video/fbdev/msm/mdp3_layer.c b/drivers/video/fbdev/msm/mdp3_layer.c
index 0078466..98b5c19 100644
--- a/drivers/video/fbdev/msm/mdp3_layer.c
+++ b/drivers/video/fbdev/msm/mdp3_layer.c
@@ -197,6 +197,7 @@
 	struct msmfb_data img;
 	bool is_panel_type_cmd = false;
 	struct mdp3_img_data data;
+	int intf_type;
 	int rc = 0;
 
 	layer = &input_layer[0];
@@ -208,23 +209,24 @@
 		goto err;
 	}
 
+	intf_type = mdp3_get_ion_client(mfd);
 	memset(&img, 0, sizeof(img));
 	img.memory_id = buffer->planes[0].fd;
 	img.offset = buffer->planes[0].offset;
 
 	memset(&data, 0, sizeof(struct mdp3_img_data));
 
-	if (mfd->panel.type == MIPI_CMD_PANEL)
+	if (mfd->panel.type == MIPI_CMD_PANEL || intf_type == MDP3_CLIENT_SPI)
 		is_panel_type_cmd = true;
 	if (is_panel_type_cmd) {
-		rc = mdp3_iommu_enable(MDP3_CLIENT_DMA_P);
+		rc = mdp3_iommu_enable(intf_type);
 		if (rc) {
 			pr_err("fail to enable iommu\n");
 			return rc;
 		}
 	}
 
-	rc = mdp3_get_img(&img, &data, MDP3_CLIENT_DMA_P);
+	rc = mdp3_get_img(&img, &data, intf_type);
 	if (rc) {
 		pr_err("fail to get overlay buffer\n");
 		goto err;
@@ -260,7 +262,7 @@
 	struct mdp3_session_data *mdp3_session;
 	struct mdp3_dma *dma;
 	int layer_count = commit->input_layer_cnt;
-	int stride, format;
+	int stride, format, client;
 
 	/* Handle NULL commit */
 	if (!layer_count) {
@@ -273,7 +275,8 @@
 
 	mutex_lock(&mdp3_session->lock);
 
-	mdp3_bufq_deinit(&mdp3_session->bufq_in);
+	client = mdp3_get_ion_client(mfd);
+	mdp3_bufq_deinit(&mdp3_session->bufq_in, client);
 
 	layer_list = commit->input_layers;
 	layer = &layer_list[0];
diff --git a/drivers/video/fbdev/msm/mdp3_ppp.c b/drivers/video/fbdev/msm/mdp3_ppp.c
index 6095073..5459510 100644
--- a/drivers/video/fbdev/msm/mdp3_ppp.c
+++ b/drivers/video/fbdev/msm/mdp3_ppp.c
@@ -542,6 +542,7 @@
 {
 	struct mdss_panel_info *panel_info = mfd->panel_info;
 	int i, lcount = 0;
+	int frame_rate = DEFAULT_FRAME_RATE;
 	struct mdp_blit_req *req;
 	struct bpp_info bpp;
 	u64 old_solid_fill_pixel = 0;
@@ -556,6 +557,7 @@
 
 	ATRACE_BEGIN(__func__);
 	lcount = lreq->count;
+	frame_rate = mdss_panel_get_framerate(panel_info, FPS_RESOLUTION_HZ);
 	if (lcount == 0) {
 		pr_err("Blit with request count 0, continue to recover!!!\n");
 		ATRACE_END(__func__);
@@ -583,11 +585,11 @@
 		is_blit_optimization_possible(lreq, i);
 		req = &(lreq->req_list[i]);
 
-		if (req->fps > 0 && req->fps <= panel_info->mipi.frame_rate) {
+		if (req->fps > 0 && req->fps <= frame_rate) {
 			if (fps == 0)
 				fps = req->fps;
 			else
-				fps = panel_info->mipi.frame_rate;
+				fps = frame_rate;
 		}
 
 		mdp3_get_bpp_info(req->src.format, &bpp);
@@ -645,7 +647,7 @@
 	}
 
 	if (fps == 0)
-		fps = panel_info->mipi.frame_rate;
+		fps = frame_rate;
 
 	if (lreq->req_list[0].flags & MDP_SOLID_FILL) {
 		honest_ppp_ab = ppp_res.solid_fill_byte * 4;
@@ -1462,6 +1464,18 @@
 			(!(check_if_rgb(bg_req.src.format))) &&
 			(!(hw_woraround_active))) {
 			/*
+			 * Disable SMART blit for BG(YUV) layer when
+			 * Scaling on BG layer
+			 * Rotation on BG layer
+			 * UD flip on BG layer
+			 */
+			if ((is_scaling_needed(bg_req)) && (
+				bg_req.flags & MDP_ROT_90) &&
+				(bg_req.flags & MDP_FLIP_UD)) {
+				pr_debug("YUV layer with ROT+UD_FLIP+Scaling Not supported\n");
+				return false;
+			}
+			/*
 			 * swap blit requests at index 0 and 1. YUV layer at
 			 * index 0 is replaced with UI layer request present
 			 * at index 1. Since UI layer will be in  background
diff --git a/drivers/video/fbdev/msm/mdp3_ppp_data.c b/drivers/video/fbdev/msm/mdp3_ppp_data.c
index ac88d9b..dd2cce0 100644
--- a/drivers/video/fbdev/msm/mdp3_ppp_data.c
+++ b/drivers/video/fbdev/msm/mdp3_ppp_data.c
@@ -89,8 +89,8 @@
 };
 
 const uint32_t swapped_pack_patt_lut[MDP_IMGTYPE_LIMIT] = {
-	[MDP_RGB_565] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
-	[MDP_BGR_565] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+	[MDP_RGB_565] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+	[MDP_BGR_565] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
 	[MDP_RGB_888] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
 	[MDP_BGR_888] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
 	[MDP_BGRA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R,
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index e6d55b5..6fa7906 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -163,6 +163,7 @@
 	MDSS_QUIRK_NEED_SECURE_MAP,
 	MDSS_QUIRK_SRC_SPLIT_ALWAYS,
 	MDSS_QUIRK_HDR_SUPPORT_ENABLED,
+	MDSS_QUIRK_MDP_CLK_SET_RATE,
 	MDSS_QUIRK_MAX,
 };
 
@@ -289,6 +290,7 @@
 	u32 max_mdp_clk_rate;
 	struct mdss_util_intf *mdss_util;
 	struct mdss_panel_data *pdata;
+	unsigned long mdp_clk_rate;
 
 	struct platform_device *pdev;
 	struct mdss_io_data mdss_io;
@@ -414,6 +416,7 @@
 	u32 enable_gate;
 	u32 enable_bw_release;
 	u32 enable_rotator_bw_release;
+	u32 enable_cdp;
 	u32 serialize_wait4pp;
 	u32 wait4autorefresh;
 	u32 lines_before_active;
@@ -466,6 +469,7 @@
 	u32 nmax_concurrent_ad_hw;
 	struct workqueue_struct *ad_calc_wq;
 	u32 ad_debugen;
+	bool mem_retain;
 
 	struct mdss_intr hist_intr;
 
@@ -522,6 +526,7 @@
 
 	u32 splash_intf_sel;
 	u32 splash_split_disp;
+	struct mult_factor bus_throughput_factor;
 };
 
 extern struct mdss_data_type *mdss_res;
diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.c b/drivers/video/fbdev/msm/mdss_compat_utils.c
index 9a9f5e4..a81f149 100644
--- a/drivers/video/fbdev/msm/mdss_compat_utils.c
+++ b/drivers/video/fbdev/msm/mdss_compat_utils.c
@@ -129,6 +129,7 @@
 		commit32->commit_v1.input_layer_cnt;
 	commit->commit_v1.left_roi = commit32->commit_v1.left_roi;
 	commit->commit_v1.right_roi = commit32->commit_v1.right_roi;
+	commit->commit_v1.bl_level = commit32->commit_v1.bl_level;
 	memcpy(&commit->commit_v1.reserved, &commit32->commit_v1.reserved,
 		count);
 }
diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.h b/drivers/video/fbdev/msm/mdss_compat_utils.h
index ebae393..819106b 100644
--- a/drivers/video/fbdev/msm/mdss_compat_utils.h
+++ b/drivers/video/fbdev/msm/mdss_compat_utils.h
@@ -539,6 +539,7 @@
 	compat_caddr_t		dest_scaler;
 	uint32_t                dest_scaler_cnt;
 	compat_caddr_t		frc_info;
+	uint32_t		bl_level; /* BL level to be updated in commit */
 	uint32_t		reserved[MDP_LAYER_COMMIT_V1_PAD];
 };
 
diff --git a/drivers/video/fbdev/msm/mdss_debug.c b/drivers/video/fbdev/msm/mdss_debug.c
index 7d07040..86f3bce 100644
--- a/drivers/video/fbdev/msm/mdss_debug.c
+++ b/drivers/video/fbdev/msm/mdss_debug.c
@@ -1109,7 +1109,7 @@
 	struct mdss_data_type *mdata = file->private_data;
 	struct mdss_max_bw_settings *temp_settings;
 	int len = 0, i;
-	char buf[256];
+	char buf[256] = {'\0'};
 
 	if (!mdata)
 		return -ENODEV;
diff --git a/drivers/video/fbdev/msm/mdss_debug_xlog.c b/drivers/video/fbdev/msm/mdss_debug_xlog.c
index 7bf4f11..ed00fc5 100644
--- a/drivers/video/fbdev/msm/mdss_debug_xlog.c
+++ b/drivers/video/fbdev/msm/mdss_debug_xlog.c
@@ -93,6 +93,48 @@
 		(flag == MDSS_XLOG_ALL && mdss_dbg_xlog.xlog_enable);
 }
 
+static void __halt_vbif_xin(void)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	pr_err("Halting VBIF-XIN\n");
+	MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0, 0xFFFFFFFF, false);
+}
+
+static void __halt_vbif_axi(void)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	pr_err("Halting VBIF-AXI\n");
+	MDSS_VBIF_WRITE(mdata, MMSS_VBIF_AXI_HALT_CTRL0, 0xFFFFFFFF, false);
+}
+
+static void __dump_vbif_state(void)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	unsigned int reg_vbif_src_err, reg_vbif_err_info,
+		reg_vbif_xin_halt_ctrl0, reg_vbif_xin_halt_ctrl1,
+		reg_vbif_axi_halt_ctrl0, reg_vbif_axi_halt_ctrl1;
+
+	reg_vbif_src_err = MDSS_VBIF_READ(mdata,
+					MMSS_VBIF_SRC_ERR, false);
+	reg_vbif_err_info = MDSS_VBIF_READ(mdata,
+					MMSS_VBIF_ERR_INFO, false);
+	reg_vbif_xin_halt_ctrl0 = MDSS_VBIF_READ(mdata,
+					MMSS_VBIF_XIN_HALT_CTRL0, false);
+	reg_vbif_xin_halt_ctrl1 = MDSS_VBIF_READ(mdata,
+					MMSS_VBIF_XIN_HALT_CTRL1, false);
+	reg_vbif_axi_halt_ctrl0 = MDSS_VBIF_READ(mdata,
+					MMSS_VBIF_AXI_HALT_CTRL0, false);
+	reg_vbif_axi_halt_ctrl1 = MDSS_VBIF_READ(mdata,
+					MMSS_VBIF_AXI_HALT_CTRL1, false);
+	pr_err("VBIF SRC_ERR=%x, ERR_INFO=%x\n",
+				reg_vbif_src_err, reg_vbif_err_info);
+	pr_err("VBIF XIN_HALT_CTRL0=%x, XIN_HALT_CTRL1=%x, AXI_HALT_CTRL0=%x, AXI_HALT_CTRL1=%x\n"
+			, reg_vbif_xin_halt_ctrl0, reg_vbif_xin_halt_ctrl1,
+			reg_vbif_axi_halt_ctrl0, reg_vbif_axi_halt_ctrl1);
+}
+
 void mdss_xlog(const char *name, int line, int flag, ...)
 {
 	unsigned long flags;
@@ -604,8 +646,17 @@
 		mdss_dump_dsi_debug_bus(mdss_dbg_xlog.enable_dsi_dbgbus_dump,
 			&mdss_dbg_xlog.dsi_dbgbus_dump);
 
-	if (dead && mdss_dbg_xlog.panic_on_err)
+	if (dead && mdss_dbg_xlog.panic_on_err) {
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+		__dump_vbif_state();
+		__halt_vbif_xin();
+		usleep_range(10000, 10010);
+		__halt_vbif_axi();
+		usleep_range(10000, 10010);
+		__dump_vbif_state();
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
 		panic(name);
+	}
 }
 
 static void xlog_debug_work(struct work_struct *work)
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index d3e5269..b4e4bdd 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -383,6 +383,14 @@
 		ret = 0;
 	}
 
+	if (gpio_is_valid(ctrl_pdata->vdd_ext_gpio)) {
+		ret = gpio_direction_output(
+			ctrl_pdata->vdd_ext_gpio, 0);
+		if (ret)
+			pr_err("%s: unable to set dir for vdd gpio\n",
+					__func__);
+	}
+
 	if (mdss_dsi_pinctrl_set_state(ctrl_pdata, false))
 		pr_debug("reset disable: pinctrl not enabled\n");
 
@@ -410,6 +418,15 @@
 	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
 				panel_data);
 
+	if (gpio_is_valid(ctrl_pdata->vdd_ext_gpio)) {
+		ret = gpio_direction_output(
+				ctrl_pdata->vdd_ext_gpio, 1);
+		usleep_range(3000, 4000); /* h/w recommended delay */
+		if (ret)
+			pr_err("%s: unable to set dir for vdd gpio\n",
+					__func__);
+	}
+
 	ret = msm_mdss_enable_vreg(
 		ctrl_pdata->panel_power_data.vreg_config,
 		ctrl_pdata->panel_power_data.num_vreg, 1);
@@ -2751,7 +2768,6 @@
 		break;
 	case MDSS_EVENT_PANEL_OFF:
 		power_state = (int) (unsigned long) arg;
-		disable_esd_thread();
 		ctrl_pdata->ctrl_state &= ~CTRL_STATE_MDP_ACTIVE;
 		if (ctrl_pdata->off_cmds.link_state == DSI_LP_MODE)
 			rc = mdss_dsi_blank(pdata, power_state);
@@ -3264,6 +3280,7 @@
 	struct mdss_util_intf *util;
 	static int te_irq_registered;
 	struct mdss_panel_data *pdata;
+	struct mdss_panel_cfg *pan_cfg = NULL;
 
 	if (!pdev || !pdev->dev.of_node) {
 		pr_err("%s: pdev not found for DSI controller\n", __func__);
@@ -3296,6 +3313,14 @@
 		return -ENODEV;
 	}
 
+	pan_cfg = util->panel_intf_type(MDSS_PANEL_INTF_SPI);
+	if (IS_ERR(pan_cfg)) {
+		return PTR_ERR(pan_cfg);
+	} else if (pan_cfg) {
+		pr_debug("%s: SPI is primary\n", __func__);
+		return -ENODEV;
+	}
+
 	ctrl_pdata->mdss_util = util;
 	atomic_set(&ctrl_pdata->te_irq_ready, 0);
 
@@ -4184,6 +4209,11 @@
 			of_property_read_bool(ctrl_pdev->dev.of_node,
 				"qcom,platform-bklight-en-gpio-invert");
 
+	ctrl_pdata->vdd_ext_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
+		"qcom,ext-vdd-gpio", 0);
+	if (!gpio_is_valid(ctrl_pdata->vdd_ext_gpio))
+		pr_info("%s: ext vdd gpio not specified\n", __func__);
+
 	ctrl_pdata->rst_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
 			 "qcom,platform-reset-gpio", 0);
 	if (!gpio_is_valid(ctrl_pdata->rst_gpio))
diff --git a/drivers/video/fbdev/msm/mdss_dsi.h b/drivers/video/fbdev/msm/mdss_dsi.h
index 5e921ff..5d2d677 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.h
+++ b/drivers/video/fbdev/msm/mdss_dsi.h
@@ -435,6 +435,7 @@
 	int rst_gpio;
 	int disp_en_gpio;
 	int bklt_en_gpio;
+	int vdd_ext_gpio;
 	int mode_gpio;
 	int intf_mux_gpio;
 	bool bklt_en_gpio_invert;
@@ -685,6 +686,7 @@
 	u32 mask, u32 val);
 int mdss_dsi_phy_pll_reset_status(struct mdss_dsi_ctrl_pdata *ctrl);
 int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata, int power_state);
+void mdss_dsi_ctrl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl);
 
 void mdss_dsi_debug_bus_init(struct mdss_dsi_data *sdata);
 
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
index f425620..751a463 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_host.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -82,6 +82,8 @@
 static struct mdss_dsi_event dsi_event;
 
 static int dsi_event_thread(void *data);
+static void dsi_send_events(struct mdss_dsi_ctrl_pdata *ctrl,
+					u32 events, u32 arg);
 
 void mdss_dsi_ctrl_init(struct device *ctrl_dev,
 			struct mdss_dsi_ctrl_pdata *ctrl)
@@ -820,8 +822,10 @@
 		 * Disable PHY contention detection and receive.
 		 * Configure the strength ctrl 1 register.
 		 */
-		MIPI_OUTP((ctrl0->phy_io.base) + 0x0188, 0);
-		MIPI_OUTP((ctrl1->phy_io.base) + 0x0188, 0);
+		if (ctrl0->shared_data->phy_rev != DSI_PHY_REV_12NM) {
+			MIPI_OUTP((ctrl0->phy_io.base) + 0x0188, 0);
+			MIPI_OUTP((ctrl1->phy_io.base) + 0x0188, 0);
+		}
 
 		data0 = MIPI_INP(ctrl0->ctrl_base + 0x0004);
 		data1 = MIPI_INP(ctrl1->ctrl_base + 0x0004);
@@ -876,6 +880,18 @@
 			udelay(u_dly);
 		}
 		if (i == loop) {
+			if ((ctrl0->shared_data->phy_rev == DSI_PHY_REV_12NM) &&
+				(event == DSI_EV_LP_RX_TIMEOUT)) {
+				struct mdss_panel_info *pinfo =
+					&ctrl0->panel_data.panel_info;
+				/* If ESD is not enabled, report panel dead */
+				if (!pinfo->esd_check_enabled &&
+					ctrl0->recovery)
+					ctrl0->recovery->fxn(
+						ctrl0->recovery->data,
+						MDP_INTF_DSI_PANEL_DEAD);
+				return;
+			}
 			MDSS_XLOG(ctrl0->ndx, ln0, 0x1f1f);
 			MDSS_XLOG(ctrl1->ndx, ln1, 0x1f1f);
 			pr_err("%s: Clock lane still in stop state\n",
@@ -896,13 +912,20 @@
 		 * Enable PHY contention detection and receive.
 		 * Configure the strength ctrl 1 register.
 		 */
-		MIPI_OUTP((ctrl0->phy_io.base) + 0x0188, 0x6);
-		MIPI_OUTP((ctrl1->phy_io.base) + 0x0188, 0x6);
+		if (ctrl0->shared_data->phy_rev != DSI_PHY_REV_12NM) {
+			MIPI_OUTP((ctrl0->phy_io.base) + 0x0188, 0x6);
+			MIPI_OUTP((ctrl1->phy_io.base) + 0x0188, 0x6);
+		}
 		/*
 		 * Add sufficient delay to make sure
 		 * pixel transmission as started
 		 */
 		udelay(200);
+		/* Un-mask LP_RX_TIMEOUT error if recovery successful */
+		if (event == DSI_EV_LP_RX_TIMEOUT) {
+			mdss_dsi_set_reg(ctrl0, 0x10c, BIT(5), 0);
+			mdss_dsi_set_reg(ctrl1, 0x10c, BIT(5), 0);
+		}
 	} else {
 		if (ctrl->recovery) {
 			rc = ctrl->recovery->fxn(ctrl->recovery->data,
@@ -914,7 +937,8 @@
 			}
 		}
 		/* Disable PHY contention detection and receive */
-		MIPI_OUTP((ctrl->phy_io.base) + 0x0188, 0);
+		if (ctrl->shared_data->phy_rev != DSI_PHY_REV_12NM)
+			MIPI_OUTP((ctrl->phy_io.base) + 0x0188, 0);
 
 		data0 = MIPI_INP(ctrl->ctrl_base + 0x0004);
 		/* Disable DSI video mode */
@@ -955,6 +979,17 @@
 			udelay(u_dly);
 		}
 		if (i == loop) {
+			if ((ctrl->shared_data->phy_rev == DSI_PHY_REV_12NM) &&
+				(event == DSI_EV_LP_RX_TIMEOUT)) {
+				struct mdss_panel_info *pinfo =
+					&ctrl->panel_data.panel_info;
+				/* If ESD is not enabled, report panel dead */
+				if (!pinfo->esd_check_enabled && ctrl->recovery)
+					ctrl->recovery->fxn(
+						ctrl->recovery->data,
+						MDP_INTF_DSI_PANEL_DEAD);
+				return;
+			}
 			MDSS_XLOG(ctrl->ndx, ln0, 0x1f1f);
 			pr_err("%s: Clock lane still in stop state\n",
 					__func__);
@@ -968,12 +1003,16 @@
 		/* Enable Video mode for DSI controller */
 		MIPI_OUTP(ctrl->ctrl_base + 0x004, data0);
 		/* Enable PHY contention detection and receiver */
-		MIPI_OUTP((ctrl->phy_io.base) + 0x0188, 0x6);
+		if (ctrl->shared_data->phy_rev != DSI_PHY_REV_12NM)
+			MIPI_OUTP((ctrl->phy_io.base) + 0x0188, 0x6);
 		/*
 		 * Add sufficient delay to make sure
 		 * pixel transmission as started
 		 */
 		udelay(200);
+		/* Un-mask LP_RX_TIMEOUT error if recovery successful */
+		if (event == DSI_EV_LP_RX_TIMEOUT)
+			mdss_dsi_set_reg(ctrl, 0x10c, BIT(5), 0);
 	}
 	pr_debug("Recovery done\n");
 }
@@ -1513,15 +1552,45 @@
 	ret = wait_for_completion_killable_timeout(&ctrl_pdata->bta_comp,
 						DSI_BTA_EVENT_TIMEOUT);
 	if (ret <= 0) {
-		mdss_dsi_disable_irq(ctrl_pdata, DSI_BTA_TERM);
-		pr_err("%s: DSI BTA error: %i\n", __func__, ret);
+		u32 reg_val, status;
+
+		reg_val = MIPI_INP(ctrl_pdata->ctrl_base + 0x0110);
+		status = reg_val & DSI_INTR_BTA_DONE;
+		if (status) {
+			reg_val &= DSI_INTR_MASK_ALL;
+			/* clear BTA_DONE isr only */
+			reg_val |= DSI_INTR_BTA_DONE;
+			MIPI_OUTP(ctrl_pdata->ctrl_base + 0x0110, reg_val);
+			mdss_dsi_disable_irq(ctrl_pdata, DSI_BTA_TERM);
+			complete(&ctrl_pdata->bta_comp);
+			ret = 1;
+			pr_warn("%s: bta done but irq not triggered\n",
+				__func__);
+		} else {
+			pr_err("%s: DSI BTA error: %i\n", __func__, ret);
+			/*
+			 * For 12nm DSI PHY, BTA_TO interrupt may not trigger.
+			 * Treat software timer timeout as BTA_TO.
+			 */
+			if (ctrl_pdata->shared_data->phy_rev ==
+				DSI_PHY_REV_12NM) {
+				/* Mask BTA_TIMEOUT/LP_RX_TIMEOUT error */
+				mdss_dsi_set_reg(ctrl_pdata, 0x10c,
+					(BIT(5) | BIT(7)), (BIT(5) | BIT(7)));
+				dsi_send_events(ctrl_pdata,
+					DSI_EV_LP_RX_TIMEOUT, 0);
+			}
+			ret = -ETIMEDOUT;
+		}
 	}
 
 	if (ignore_underflow) {
+		u32 data = MIPI_INP((ctrl_pdata->ctrl_base) + 0x10c);
 		/* clear pending overflow status */
 		mdss_dsi_set_reg(ctrl_pdata, 0xc, 0xffffffff, 0x44440000);
-		/* restore overflow isr */
-		mdss_dsi_set_reg(ctrl_pdata, 0x10c, 0x0f0000, 0);
+		/* restore overflow isr if LP_RX_TO not masked*/
+		if (!(data & BIT(5)))
+			mdss_dsi_set_reg(ctrl_pdata, 0x10c, 0x0f0000, 0);
 	}
 
 	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
@@ -2164,10 +2233,12 @@
 
 	if (mctrl && mctrl->dma_addr) {
 		if (ignored) {
+			u32 data = MIPI_INP((mctrl->ctrl_base) + 0x10c);
 			/* clear pending overflow status */
 			mdss_dsi_set_reg(mctrl, 0xc, 0xffffffff, 0x44440000);
-			/* restore overflow isr */
-			mdss_dsi_set_reg(mctrl, 0x10c, 0x0f0000, 0);
+			/* restore overflow isr if LP_RX_TO not masked*/
+			if (!(data & BIT(5)))
+				mdss_dsi_set_reg(mctrl, 0x10c, 0x0f0000, 0);
 		}
 		if (mctrl->dmap_iommu_map) {
 			mdss_smmu_dsi_unmap_buffer(mctrl->dma_addr, domain,
@@ -2185,10 +2256,12 @@
 	}
 
 	if (ignored) {
+		u32 data = MIPI_INP((ctrl->ctrl_base) + 0x10c);
 		/* clear pending overflow status */
 		mdss_dsi_set_reg(ctrl, 0xc, 0xffffffff, 0x44440000);
-		/* restore overflow isr */
-		mdss_dsi_set_reg(ctrl, 0x10c, 0x0f0000, 0);
+		/* restore overflow isr if LP_RX_TO/BTA_TO not masked*/
+		if (!(data & BIT(5)))
+			mdss_dsi_set_reg(ctrl, 0x10c, 0x0f0000, 0);
 	}
 	ctrl->dma_addr = 0;
 	ctrl->dma_size = 0;
@@ -2204,7 +2277,7 @@
 	u32 *lp, *temp, data;
 	int i, j = 0, off, cnt;
 	bool ack_error = false;
-	char reg[16];
+	char reg[16] = {0x0};
 	int repeated_bytes = 0;
 	struct mdss_dsi_ctrl_pdata *mctrl = mdss_dsi_get_other_ctrl(ctrl);
 
@@ -3024,8 +3097,12 @@
 
 	if (status & 0x0111) {
 		MIPI_OUTP(base + 0x00c0, status);
-		if (status & 0x0110)
+		if (status & 0x0110) {
+			/* Mask BTA_TIMEOUT/LP_RX_TIMEOUT error */
+			mdss_dsi_set_reg(ctrl, 0x10c,
+				(BIT(5) | BIT(7)), (BIT(5) | BIT(7)));
 			dsi_send_events(ctrl, DSI_EV_LP_RX_TIMEOUT, 0);
+		}
 		pr_err("%s: status=%x\n", __func__, status);
 		ret = true;
 	}
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index e586667..cde2bc3 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -24,6 +24,7 @@
 #include <linux/string.h>
 
 #include "mdss_dsi.h"
+#include "mdss_debug.h"
 #ifdef TARGET_HW_MDSS_HDMI
 #include "mdss_dba_utils.h"
 #endif
@@ -260,11 +261,12 @@
 	ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
 						panel_data);
 
-	pr_debug("%s: Idle (%d->%d)\n", __func__, ctrl->idle, enable);
+	pr_info("%s: Idle (%d->%d)\n", __func__, ctrl->idle, enable);
 
 	if (ctrl->idle == enable)
 		return;
 
+	MDSS_XLOG(ctrl->idle, enable);
 	if (enable) {
 		if (ctrl->idle_on_cmds.cmd_cnt) {
 			mdss_dsi_panel_cmds_send(ctrl, &ctrl->idle_on_cmds,
@@ -324,6 +326,15 @@
 			goto bklt_en_gpio_err;
 		}
 	}
+	if (gpio_is_valid(ctrl_pdata->vdd_ext_gpio)) {
+		rc = gpio_request(ctrl_pdata->vdd_ext_gpio,
+						"vdd_enable");
+		if (rc) {
+			pr_err("request vdd enable gpio failed, rc=%d\n",
+				rc);
+			goto vdd_en_gpio_err;
+		}
+	}
 	if (gpio_is_valid(ctrl_pdata->mode_gpio)) {
 		rc = gpio_request(ctrl_pdata->mode_gpio, "panel_mode");
 		if (rc) {
@@ -335,6 +346,9 @@
 	return rc;
 
 mode_gpio_err:
+	if (gpio_is_valid(ctrl_pdata->vdd_ext_gpio))
+		gpio_free(ctrl_pdata->vdd_ext_gpio);
+vdd_en_gpio_err:
 	if (gpio_is_valid(ctrl_pdata->bklt_en_gpio))
 		gpio_free(ctrl_pdata->bklt_en_gpio);
 bklt_en_gpio_err:
@@ -1050,6 +1064,48 @@
 	return 0;
 }
 
+static void mdss_dsi_parse_mdp_kickoff_threshold(struct device_node *np,
+	struct mdss_panel_info *pinfo)
+{
+	int len, rc;
+	const u32 *src;
+	u32 tmp;
+	u32 max_delay_us;
+
+	pinfo->mdp_koff_thshold = false;
+	src = of_get_property(np, "qcom,mdss-mdp-kickoff-threshold", &len);
+	if (!src || (len == 0))
+		return;
+
+	rc = of_property_read_u32(np, "qcom,mdss-mdp-kickoff-delay", &tmp);
+	if (!rc)
+		pinfo->mdp_koff_delay = tmp;
+	else
+		return;
+
+	if (pinfo->mipi.frame_rate == 0) {
+		pr_err("cannot enable guard window, unexpected panel fps\n");
+		return;
+	}
+
+	pinfo->mdp_koff_thshold_low = be32_to_cpu(src[0]);
+	pinfo->mdp_koff_thshold_high = be32_to_cpu(src[1]);
+	max_delay_us = 1000000 / pinfo->mipi.frame_rate;
+
+	/* enable the feature if threshold is valid */
+	if ((pinfo->mdp_koff_thshold_low < pinfo->mdp_koff_thshold_high) &&
+	   ((pinfo->mdp_koff_delay > 0) ||
+	    (pinfo->mdp_koff_delay < max_delay_us)))
+		pinfo->mdp_koff_thshold = true;
+
+	pr_debug("panel kickoff thshold:[%d, %d] delay:%d (max:%d) enable:%d\n",
+		pinfo->mdp_koff_thshold_low,
+		pinfo->mdp_koff_thshold_high,
+		pinfo->mdp_koff_delay,
+		max_delay_us,
+		pinfo->mdp_koff_thshold);
+}
+
 static void mdss_dsi_parse_trigger(struct device_node *np, char *trigger,
 		char *trigger_key)
 {
@@ -2831,6 +2887,8 @@
 	rc = of_property_read_u32(np, "qcom,mdss-mdp-transfer-time-us", &tmp);
 	pinfo->mdp_transfer_time_us = (!rc ? tmp : DEFAULT_MDP_TRANSFER_TIME);
 
+	mdss_dsi_parse_mdp_kickoff_threshold(np, pinfo);
+
 	pinfo->mipi.lp11_init = of_property_read_bool(np,
 					"qcom,mdss-dsi-lp11-init");
 	rc = of_property_read_u32(np, "qcom,mdss-dsi-init-delay-us", &tmp);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_phy_12nm.c b/drivers/video/fbdev/msm/mdss_dsi_phy_12nm.c
index 9bd2c4d..ec179e8 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_phy_12nm.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_phy_12nm.c
@@ -107,6 +107,7 @@
 {
 	DSI_PHY_W32(ctrl->phy_io.base, SYS_CTRL, BIT(0) | BIT(3));
 	wmb(); /* make sure DSI PHY is disabled */
+	mdss_dsi_ctrl_phy_reset(ctrl);
 	return 0;
 }
 
diff --git a/drivers/video/fbdev/msm/mdss_dsi_status.c b/drivers/video/fbdev/msm/mdss_dsi_status.c
index c5af962..326768d 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_status.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_status.c
@@ -146,15 +146,18 @@
 		return NOTIFY_DONE;
 
 	mfd = evdata->info->par;
-	ctrl_pdata = container_of(dev_get_platdata(&mfd->pdev->dev),
+	if (mfd->panel_info->type == SPI_PANEL) {
+		pinfo = mfd->panel_info;
+	} else {
+		ctrl_pdata = container_of(dev_get_platdata(&mfd->pdev->dev),
 				struct mdss_dsi_ctrl_pdata, panel_data);
-	if (!ctrl_pdata) {
-		pr_err("%s: DSI ctrl not available\n", __func__);
-		return NOTIFY_BAD;
+		if (!ctrl_pdata) {
+			pr_err("%s: DSI ctrl not available\n", __func__);
+			return NOTIFY_BAD;
+		}
+
+		pinfo = &ctrl_pdata->panel_data.panel_info;
 	}
-
-	pinfo = &ctrl_pdata->panel_data.panel_info;
-
 	if ((!(pinfo->esd_check_enabled) &&
 			dsi_status_disable) ||
 			(dsi_status_disable == DSI_STATUS_CHECK_DISABLE)) {
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index 72f651e..72adb17 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -243,9 +243,11 @@
 		}
 	} else if (notify == NOTIFY_UPDATE_STOP) {
 		mutex_lock(&mfd->update.lock);
-		if (mfd->update.init_done)
+		if (mfd->update.init_done) {
+			mutex_unlock(&mfd->update.lock);
+			mutex_lock(&mfd->no_update.lock);
 			reinit_completion(&mfd->no_update.comp);
-		else {
+		} else {
 			mutex_unlock(&mfd->update.lock);
 			pr_err("notify update stop called without init\n");
 			return -EINVAL;
@@ -306,10 +308,23 @@
 	}
 }
 
+static enum led_brightness mdss_fb_get_bl_brightness(
+	struct led_classdev *led_cdev)
+{
+	struct msm_fb_data_type *mfd = dev_get_drvdata(led_cdev->dev->parent);
+	u64 value;
+
+	MDSS_BL_TO_BRIGHT(value, mfd->bl_level, mfd->panel_info->bl_max,
+			  mfd->panel_info->brightness_max);
+
+	return value;
+}
+
 static struct led_classdev backlight_led = {
 	.name           = "lcd-backlight",
 	.brightness     = MDSS_MAX_BL_BRIGHTNESS / 2,
 	.brightness_set = mdss_fb_set_bl_brightness,
+	.brightness_get = mdss_fb_get_bl_brightness,
 	.max_brightness = MDSS_MAX_BL_BRIGHTNESS,
 };
 
@@ -345,6 +360,9 @@
 	case EDP_PANEL:
 		ret = snprintf(buf, PAGE_SIZE, "edp panel\n");
 		break;
+	case SPI_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "spi panel\n");
+		break;
 	default:
 		ret = snprintf(buf, PAGE_SIZE, "unknown panel\n");
 		break;
@@ -592,7 +610,8 @@
 			"white_chromaticity_x=%d\nwhite_chromaticity_y=%d\n"
 			"red_chromaticity_x=%d\nred_chromaticity_y=%d\n"
 			"green_chromaticity_x=%d\ngreen_chromaticity_y=%d\n"
-			"blue_chromaticity_x=%d\nblue_chromaticity_y=%d\n",
+			"blue_chromaticity_x=%d\nblue_chromaticity_y=%d\n"
+			"panel_orientation=%d\n",
 			pinfo->partial_update_enabled,
 			pinfo->roi_alignment.xstart_pix_align,
 			pinfo->roi_alignment.width_pix_align,
@@ -615,7 +634,8 @@
 			pinfo->hdr_properties.display_primaries[4],
 			pinfo->hdr_properties.display_primaries[5],
 			pinfo->hdr_properties.display_primaries[6],
-			pinfo->hdr_properties.display_primaries[7]);
+			pinfo->hdr_properties.display_primaries[7],
+			pinfo->panel_orientation);
 
 	return ret;
 }
@@ -885,6 +905,12 @@
 	return ret;
 }
 
+static ssize_t mdss_fb_idle_pc_notify(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "idle power collapsed\n");
+}
+
 static DEVICE_ATTR(msm_fb_type, 0444, mdss_fb_get_type, NULL);
 static DEVICE_ATTR(msm_fb_split, 0644, mdss_fb_show_split,
 					mdss_fb_store_split);
@@ -905,6 +931,8 @@
 	mdss_fb_get_fps_info, NULL);
 static DEVICE_ATTR(msm_fb_persist_mode, 0644,
 	mdss_fb_get_persist_mode, mdss_fb_change_persist_mode);
+static DEVICE_ATTR(idle_power_collapse, 0444, mdss_fb_idle_pc_notify, NULL);
+
 static struct attribute *mdss_fb_attrs[] = {
 	&dev_attr_msm_fb_type.attr,
 	&dev_attr_msm_fb_split.attr,
@@ -918,6 +946,7 @@
 	&dev_attr_msm_fb_dfps_mode.attr,
 	&dev_attr_measured_fps.attr,
 	&dev_attr_msm_fb_persist_mode.attr,
+	&dev_attr_idle_power_collapse.attr,
 	NULL,
 };
 
@@ -1214,6 +1243,7 @@
 	struct mdss_panel_data *pdata;
 	struct fb_info *fbi;
 	int rc;
+	const char *data;
 
 	if (fbi_list_index >= MAX_FBI_LIST)
 		return -ENOMEM;
@@ -1259,9 +1289,25 @@
 	mfd->fb_imgType = MDP_RGBA_8888;
 	mfd->calib_mode_bl = 0;
 	mfd->unset_bl_level = U32_MAX;
+	mfd->bl_extn_level = -1;
 
 	mfd->pdev = pdev;
 
+	if (mfd->panel.type == SPI_PANEL)
+		mfd->fb_imgType = MDP_RGB_565;
+	if (mfd->panel.type == MIPI_VIDEO_PANEL || mfd->panel.type ==
+		MIPI_CMD_PANEL || mfd->panel.type == SPI_PANEL){
+		rc = of_property_read_string(pdev->dev.of_node,
+			"qcom,mdss-fb-format", &data);
+		if (!rc) {
+			if (!strcmp(data, "rgb888"))
+				mfd->fb_imgType = MDP_RGB_888;
+			else if (!strcmp(data, "rgb565"))
+				mfd->fb_imgType = MDP_RGB_565;
+			else
+				mfd->fb_imgType = MDP_RGBA_8888;
+			}
+		}
 	mfd->split_fb_left = mfd->split_fb_right = 0;
 
 	mdss_fb_set_split_mode(mfd, pdata);
@@ -1374,6 +1420,7 @@
 		mfd->mdp_sync_pt_data.threshold = 1;
 		mfd->mdp_sync_pt_data.retire_threshold = 0;
 		break;
+	case SPI_PANEL:
 	case MIPI_CMD_PANEL:
 		mfd->mdp_sync_pt_data.threshold = 1;
 		mfd->mdp_sync_pt_data.retire_threshold = 1;
@@ -2086,6 +2133,7 @@
 	}
 
 	ret = mdss_fb_blank_sub(blank_mode, info, mfd->op_enable);
+	MDSS_XLOG(blank_mode);
 
 end:
 	mutex_unlock(&mfd->mdss_sysfs_lock);
@@ -2843,7 +2891,9 @@
 		 * enabling ahead of unblank. for some special cases like
 		 * adb shell stop/start.
 		 */
+		mutex_lock(&mfd->bl_lock);
 		mdss_fb_set_backlight(mfd, 0);
+		mutex_unlock(&mfd->bl_lock);
 
 		ret = mdss_fb_blank_sub(FB_BLANK_POWERDOWN, info,
 			mfd->op_enable);
@@ -3440,6 +3490,15 @@
 	mfd->msm_fb_backup.atomic_commit = true;
 	mfd->msm_fb_backup.disp_commit.l_roi =  commit_v1->left_roi;
 	mfd->msm_fb_backup.disp_commit.r_roi =  commit_v1->right_roi;
+	mfd->msm_fb_backup.disp_commit.flags =  commit_v1->flags;
+	if (commit_v1->flags & MDP_COMMIT_UPDATE_BRIGHTNESS) {
+		MDSS_BRIGHT_TO_BL(mfd->bl_extn_level, commit_v1->bl_level,
+			mfd->panel_info->bl_max,
+			mfd->panel_info->brightness_max);
+		if (!mfd->bl_extn_level && commit_v1->bl_level)
+			mfd->bl_extn_level = 1;
+	} else
+		mfd->bl_extn_level = -1;
 
 	mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
 	atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
@@ -4529,6 +4588,9 @@
 		 * In case of an ESD attack, since we early return from the
 		 * commits, we need to signal the outstanding fences.
 		 */
+		mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
+		atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
+		mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
 		mdss_fb_release_fences(mfd);
 		if ((mfd->panel.type == MIPI_CMD_PANEL) &&
 			mfd->mdp.signal_retire_fence && mdp5_data)
@@ -5148,3 +5210,18 @@
 		mfd->fps_info.frame_count = 0;
 	}
 }
+
+void mdss_fb_idle_pc(struct msm_fb_data_type *mfd)
+{
+	struct mdss_overlay_private *mdp5_data;
+
+	if (!mfd || mdss_fb_is_power_off(mfd))
+		return;
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if ((mfd->panel_info->type == MIPI_CMD_PANEL) && mdp5_data) {
+		pr_debug("Notify fb%d idle power collapsed\n", mfd->index);
+		sysfs_notify(&mfd->fbi->dev->kobj, NULL, "idle_power_collapse");
+	}
+}
diff --git a/drivers/video/fbdev/msm/mdss_fb.h b/drivers/video/fbdev/msm/mdss_fb.h
index 5f2baef..3515e37 100644
--- a/drivers/video/fbdev/msm/mdss_fb.h
+++ b/drivers/video/fbdev/msm/mdss_fb.h
@@ -245,6 +245,10 @@
 				out = (2 * (v) * (bl_max) + max_bright);\
 				do_div(out, 2 * max_bright);\
 				} while (0)
+#define MDSS_BL_TO_BRIGHT(out, v, bl_max, max_bright) do {\
+				out = ((v) * (max_bright));\
+				do_div(out, bl_max);\
+				} while (0)
 
 struct mdss_fb_file_info {
 	struct file *file;
@@ -309,6 +313,7 @@
 	u32 calib_mode_bl;
 	u32 ad_bl_level;
 	u64 bl_level;
+	u64 bl_extn_level;
 	u32 bl_scale;
 	u32 bl_min_lvl;
 	u32 unset_bl_level;
@@ -473,4 +478,5 @@
 void mdss_panelinfo_to_fb_var(struct mdss_panel_info *pinfo,
 						struct fb_var_screeninfo *var);
 void mdss_fb_calc_fps(struct msm_fb_data_type *mfd);
+void mdss_fb_idle_pc(struct msm_fb_data_type *mfd);
 #endif /* MDSS_FB_H */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
index 8dce151..98f4c658 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
@@ -666,7 +666,7 @@
 
 static void hdmi_hdcp2p2_recv_msg(struct hdmi_hdcp2p2_ctrl *ctrl)
 {
-	int rc = 0, timeout_hsync;
+	int timeout_hsync = 0, rc = 0;
 	char *recvd_msg_buf = NULL;
 	struct hdmi_tx_hdcp2p2_ddc_data *ddc_data;
 	struct hdmi_tx_ddc_ctrl *ddc_ctrl;
@@ -1077,7 +1077,7 @@
 
 static bool hdmi_hdcp2p2_supported(struct hdmi_hdcp2p2_ctrl *ctrl)
 {
-	u8 hdcp2version;
+	u8 hdcp2version = 0;
 
 	int rc = hdmi_hdcp2p2_read_version(ctrl, &hdcp2version);
 
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index 4f2bb09..42e2181 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -70,8 +70,10 @@
 #define HDMI_TX_3_MAX_PCLK_RATE            297000
 #define HDMI_TX_4_MAX_PCLK_RATE            600000
 
-#define hdmi_tx_get_fd(x) (x ? hdmi_ctrl->feature_data[ffs(x) - 1] : 0)
-#define hdmi_tx_set_fd(x, y) {if (x) hdmi_ctrl->feature_data[ffs(x) - 1] = y; }
+#define hdmi_tx_get_fd(x) ((x && (ffs(x) > 0))  ? \
+			hdmi_ctrl->feature_data[ffs(x) - 1] : 0)
+#define hdmi_tx_set_fd(x, y) {if (x && (ffs(x) > 0)) \
+			hdmi_ctrl->feature_data[ffs(x) - 1] = y; }
 
 #define MAX_EDID_READ_RETRY	5
 
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index a6d43a6..24f7521 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -811,7 +811,7 @@
 
 int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num)
 {
-	int irq_idx;
+	int irq_idx = 0;
 	unsigned long irq_flags;
 	int ret = 0;
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
@@ -830,7 +830,7 @@
 	spin_lock_irqsave(&mdp_lock, irq_flags);
 	if (mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask) {
 		pr_warn("MDSS MDP IRQ-0x%x is already set, mask=%x\n",
-			irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
+				irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
 		ret = -EBUSY;
 	} else {
 		pr_debug("MDP IRQ mask old=%x new=%x\n",
@@ -1129,12 +1129,31 @@
 {
 	int ret = -ENODEV;
 	struct clk *clk = mdss_mdp_get_clk(clk_idx);
+	struct mdss_data_type *mdata = mdss_res;
 
 	if (clk) {
 		pr_debug("clk=%d en=%d\n", clk_idx, enable);
 		if (enable) {
 			if (clk_idx == MDSS_CLK_MDP_VSYNC)
 				clk_set_rate(clk, 19200000);
+			if (mdss_has_quirk(mdata, MDSS_QUIRK_MDP_CLK_SET_RATE)
+					&& (clk_idx == MDSS_CLK_MDP_CORE)) {
+
+				if (WARN_ON(!mdata->mdp_clk_rate)) {
+					/*
+					 * rate should have been set in probe
+					 * or during clk scaling; but if this
+					 * is not the case, set max clk rate.
+					 */
+					pr_warn("set max mdp clk rate:%u\n",
+						mdata->max_mdp_clk_rate);
+					mdss_mdp_set_clk_rate(
+						mdata->max_mdp_clk_rate, true);
+				} else {
+					clk_set_rate(clk, mdata->mdp_clk_rate);
+				}
+			}
+
 			ret = clk_prepare_enable(clk);
 		} else {
 			clk_disable_unprepare(clk);
@@ -1163,7 +1182,7 @@
 	return ret;
 }
 
-void mdss_mdp_set_clk_rate(unsigned long rate)
+void mdss_mdp_set_clk_rate(unsigned long rate, bool locked)
 {
 	struct mdss_data_type *mdata = mdss_res;
 	unsigned long clk_rate;
@@ -1173,7 +1192,9 @@
 	min_clk_rate = max(rate, mdata->perf_tune.min_mdp_clk);
 
 	if (clk) {
-		mutex_lock(&mdp_clk_lock);
+
+		if (!locked)
+			mutex_lock(&mdp_clk_lock);
 		if (min_clk_rate < mdata->max_mdp_clk_rate)
 			clk_rate = clk_round_rate(clk, min_clk_rate);
 		else
@@ -1181,13 +1202,15 @@
 		if (IS_ERR_VALUE(clk_rate)) {
 			pr_err("unable to round rate err=%ld\n", clk_rate);
 		} else if (clk_rate != clk_get_rate(clk)) {
-			if (IS_ERR_VALUE((unsigned long)
-					 clk_set_rate(clk, clk_rate)))
+			mdata->mdp_clk_rate = clk_rate;
+			if (IS_ERR_VALUE(
+				(unsigned long)clk_set_rate(clk, clk_rate)))
 				pr_err("clk_set_rate failed\n");
 			else
 				pr_debug("mdp clk rate=%lu\n", clk_rate);
 		}
-		mutex_unlock(&mdp_clk_lock);
+		if (!locked)
+			mutex_unlock(&mdp_clk_lock);
 	} else {
 		pr_err("mdp src clk not setup properly\n");
 	}
@@ -1285,6 +1308,68 @@
 	}
 }
 
+/*
+ * __mdss_mdp_clk_control - Overall MDSS clock control for power on/off
+ */
+static void __mdss_mdp_clk_control(struct mdss_data_type *mdata, bool enable)
+{
+	int rc = 0;
+	unsigned long flags;
+
+	if (enable) {
+		pm_runtime_get_sync(&mdata->pdev->dev);
+
+		mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+			VOTE_INDEX_LOW);
+
+		rc = mdss_iommu_ctrl(1);
+		if (IS_ERR_VALUE((unsigned long)rc))
+			pr_err("IOMMU attach failed\n");
+
+		/* Active+Sleep */
+		msm_bus_scale_client_update_context(mdata->bus_hdl,
+			false, mdata->curr_bw_uc_idx);
+
+		spin_lock_irqsave(&mdp_lock, flags);
+		mdata->clk_ena = enable;
+		spin_unlock_irqrestore(&mdp_lock, flags);
+
+		mdss_mdp_clk_update(MDSS_CLK_MNOC_AHB, 1);
+		mdss_mdp_clk_update(MDSS_CLK_AHB, 1);
+		mdss_mdp_clk_update(MDSS_CLK_AXI, 1);
+		mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 1);
+		mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, 1);
+		if (mdata->vsync_ena)
+			mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, 1);
+	} else {
+		spin_lock_irqsave(&mdp_lock, flags);
+		mdata->clk_ena = enable;
+		spin_unlock_irqrestore(&mdp_lock, flags);
+
+		if (mdata->vsync_ena)
+			mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, 0);
+
+		mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, 0);
+		mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 0);
+		mdss_mdp_clk_update(MDSS_CLK_AXI, 0);
+		mdss_mdp_clk_update(MDSS_CLK_AHB, 0);
+		mdss_mdp_clk_update(MDSS_CLK_MNOC_AHB, 0);
+
+		/* release iommu control */
+		mdss_iommu_ctrl(0);
+
+		/* Active-Only */
+		msm_bus_scale_client_update_context(mdata->bus_hdl,
+			true, mdata->ao_bw_uc_idx);
+
+		mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+			VOTE_INDEX_DISABLE);
+
+		pm_runtime_mark_last_busy(&mdata->pdev->dev);
+		pm_runtime_put_autosuspend(&mdata->pdev->dev);
+	}
+}
+
 int __mdss_mdp_vbif_halt(struct mdss_data_type *mdata, bool is_nrt)
 {
 	int rc = 0;
@@ -1382,10 +1467,17 @@
 		return mdata->iommu_ref_cnt;
 }
 
-static void mdss_mdp_memory_retention_enter(void)
+#define MEM_RETAIN_ON 1
+#define MEM_RETAIN_OFF 0
+#define PERIPH_RETAIN_ON 1
+#define PERIPH_RETAIN_OFF 0
+
+static void mdss_mdp_memory_retention_ctrl(bool mem_ctrl, bool periph_ctrl)
 {
 	struct clk *mdss_mdp_clk = NULL;
 	struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct clk *mdss_mdp_lut_clk = NULL;
 
 	if (mdp_vote_clk) {
 		mdss_mdp_clk = clk_get_parent(mdp_vote_clk);
@@ -1395,21 +1487,40 @@
 			clk_set_flags(mdss_mdp_clk, CLKFLAG_NORETAIN_PERIPH);
 		}
 	}
-}
 
-static void mdss_mdp_memory_retention_exit(void)
-{
-	struct clk *mdss_mdp_clk = NULL;
-	struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
-
-	if (mdp_vote_clk) {
-		mdss_mdp_clk = clk_get_parent(mdp_vote_clk);
-		if (mdss_mdp_clk) {
+	__mdss_mdp_reg_access_clk_enable(mdata, true);
+	if (mdss_mdp_clk) {
+		if (mem_ctrl)
 			clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM);
+		else
+			clk_set_flags(mdss_mdp_clk, CLKFLAG_NORETAIN_MEM);
+
+		if (periph_ctrl) {
 			clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_PERIPH);
 			clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_CLEAR);
+		} else {
+			clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_SET);
+			clk_set_flags(mdss_mdp_clk, CLKFLAG_NORETAIN_PERIPH);
 		}
 	}
+
+	if (mdss_mdp_lut_clk) {
+		if (mem_ctrl)
+			clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_RETAIN_MEM);
+		else
+			clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_NORETAIN_MEM);
+
+		if (periph_ctrl) {
+			clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_RETAIN_PERIPH);
+			clk_set_flags(mdss_mdp_lut_clk,
+				CLKFLAG_PERIPH_OFF_CLEAR);
+		} else {
+			clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_PERIPH_OFF_SET);
+			clk_set_flags(mdss_mdp_lut_clk,
+				CLKFLAG_NORETAIN_PERIPH);
+		}
+	}
+	__mdss_mdp_reg_access_clk_enable(mdata, false);
 }
 
 /**
@@ -1440,17 +1551,21 @@
 	mdss_hw_init(mdata);
 	mdss_iommu_ctrl(0);
 
-	/**
-	 * sleep 10 microseconds to make sure AD auto-reinitialization
-	 * is done
-	 */
-	udelay(10);
-	mdss_mdp_memory_retention_exit();
-
 	mdss_mdp_ctl_restore(true);
 	mdata->idle_pc = false;
 
 end:
+	if (mdata->mem_retain) {
+		/**
+		 * sleep 10 microseconds to make sure AD auto-reinitialization
+		 * is done
+		 */
+		udelay(10);
+		mdss_mdp_memory_retention_ctrl(MEM_RETAIN_ON,
+			PERIPH_RETAIN_ON);
+		mdata->mem_retain = false;
+	}
+
 	mutex_unlock(&mdp_fs_idle_pc_lock);
 	return rc;
 }
@@ -1543,9 +1658,7 @@
 {
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
 	static int mdp_clk_cnt;
-	unsigned long flags;
 	int changed = 0;
-	int rc = 0;
 
 	mutex_lock(&mdp_clk_lock);
 	if (enable) {
@@ -1569,49 +1682,8 @@
 		__builtin_return_address(0), current->group_leader->comm,
 		mdata->bus_ref_cnt, changed, enable);
 
-	if (changed) {
-		if (enable) {
-			pm_runtime_get_sync(&mdata->pdev->dev);
-
-			mdss_update_reg_bus_vote(mdata->reg_bus_clt,
-				VOTE_INDEX_LOW);
-
-			rc = mdss_iommu_ctrl(1);
-			if (IS_ERR_VALUE((unsigned long)rc))
-				pr_err("IOMMU attach failed\n");
-
-			/* Active+Sleep */
-			msm_bus_scale_client_update_context(mdata->bus_hdl,
-				false, mdata->curr_bw_uc_idx);
-		}
-
-		spin_lock_irqsave(&mdp_lock, flags);
-		mdata->clk_ena = enable;
-		spin_unlock_irqrestore(&mdp_lock, flags);
-
-		mdss_mdp_clk_update(MDSS_CLK_MNOC_AHB, enable);
-		mdss_mdp_clk_update(MDSS_CLK_AHB, enable);
-		mdss_mdp_clk_update(MDSS_CLK_AXI, enable);
-		mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, enable);
-		mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, enable);
-		if (mdata->vsync_ena)
-			mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
-
-		if (!enable) {
-			/* release iommu control */
-			mdss_iommu_ctrl(0);
-
-			/* Active-Only */
-			msm_bus_scale_client_update_context(mdata->bus_hdl,
-				true, mdata->ao_bw_uc_idx);
-
-			mdss_update_reg_bus_vote(mdata->reg_bus_clt,
-				VOTE_INDEX_DISABLE);
-
-			pm_runtime_mark_last_busy(&mdata->pdev->dev);
-			pm_runtime_put_autosuspend(&mdata->pdev->dev);
-		}
-	}
+	if (changed)
+		__mdss_mdp_clk_control(mdata, enable);
 
 	if (enable && changed)
 		mdss_mdp_idle_pc_restore();
@@ -1757,7 +1829,7 @@
 	mdss_mdp_irq_clk_register(mdata, "mnoc_clk", MDSS_CLK_MNOC_AHB);
 
 	/* Setting the default clock rate to the max supported.*/
-	mdss_mdp_set_clk_rate(mdata->max_mdp_clk_rate);
+	mdss_mdp_set_clk_rate(mdata->max_mdp_clk_rate, false);
 	pr_debug("mdp clk rate=%ld\n",
 		mdss_mdp_get_clk_rate(MDSS_CLK_MDP_CORE, false));
 
@@ -1867,7 +1939,8 @@
 		mdata->pixel_ram_size = 50 * 1024;
 		set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
 		set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
-		set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map); /* cdp supported */
+		mdata->enable_cdp = true; /* enable cdp */
 		set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
 		set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
 		set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
@@ -1961,7 +2034,8 @@
 		set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
 		set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
 		set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
-		set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map); /* cdp supported */
+		mdata->enable_cdp = false; /* disable cdp */
 		set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
 		set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
 		set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
@@ -1978,6 +2052,7 @@
 		mdss_set_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU);
 		mdss_set_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT);
 		mdss_set_quirk(mdata, MDSS_QUIRK_SRC_SPLIT_ALWAYS);
+		mdss_set_quirk(mdata, MDSS_QUIRK_MDP_CLK_SET_RATE);
 		mdata->has_wb_ubwc = true;
 		set_bit(MDSS_CAPS_10_BIT_SUPPORTED, mdata->mdss_caps_map);
 		break;
@@ -2340,6 +2415,8 @@
 	size_t len = PAGE_SIZE;
 	int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1);
 
+	if (!pipe)
+		return;
 #define SPRINT(fmt, ...) \
 		(*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__))
 
@@ -2824,11 +2901,9 @@
 		MDSS_MDP_REG_SPLIT_DISPLAY_EN);
 	mdata->splash_intf_sel = intf_sel;
 	mdata->splash_split_disp = split_display;
-
 	if (intf_sel != 0) {
 		for (i = 0; i < 4; i++)
-			if ((intf_sel >> i*8) & 0x000000FF)
-				num_of_display_on++;
+			num_of_display_on += ((intf_sel >> i*8) & 0x000000FF);
 
 		/*
 		 * For split display enabled - DSI0, DSI1 interfaces are
@@ -3918,6 +3993,7 @@
 static void mdss_mdp_parse_vbif_qos(struct platform_device *pdev)
 {
 	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	u32 npriority_lvl_nrt;
 	int rc;
 
 	mdata->npriority_lvl = mdss_mdp_parse_dt_prop_len(pdev,
@@ -3941,8 +4017,20 @@
 		return;
 	}
 
-	mdata->npriority_lvl = mdss_mdp_parse_dt_prop_len(pdev,
+	npriority_lvl_nrt = mdss_mdp_parse_dt_prop_len(pdev,
 			"qcom,mdss-vbif-qos-nrt-setting");
+
+	if (!npriority_lvl_nrt) {
+		pr_debug("no vbif nrt priorities found rt:%d\n",
+			mdata->npriority_lvl);
+		return;
+	} else if (npriority_lvl_nrt != mdata->npriority_lvl) {
+		/* driver expects same number for both nrt and rt */
+		pr_err("invalid nrt settings nrt(%d) != rt(%d)\n",
+			npriority_lvl_nrt, mdata->npriority_lvl);
+		return;
+	}
+
 	if (mdata->npriority_lvl == MDSS_VBIF_QOS_REMAP_ENTRIES) {
 		mdata->vbif_nrt_qos = kcalloc(mdata->npriority_lvl,
 					      sizeof(u32), GFP_KERNEL);
@@ -3958,7 +4046,7 @@
 		}
 	} else {
 		mdata->npriority_lvl = 0;
-		pr_debug("Invalid or no vbif qos nrt seting\n");
+		pr_debug("Invalid or no vbif qos nrt setting\n");
 	}
 }
 
@@ -4183,6 +4271,15 @@
 	mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-clk-factor",
 		&mdata->clk_factor);
 
+	/*
+	 * Bus throughput factor will be used during high downscale cases.
+	 * The recommended default factor is 1.1.
+	 */
+	mdata->bus_throughput_factor.numer = 11;
+	mdata->bus_throughput_factor.denom = 10;
+	mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-bus-througput-factor",
+		&mdata->bus_throughput_factor);
+
 	rc = of_property_read_u32(pdev->dev.of_node,
 			"qcom,max-bandwidth-low-kbps", &mdata->max_bw_low);
 	if (rc)
@@ -4821,6 +4918,22 @@
 }
 
 /**
+ * mdss_mdp_notify_idle_pc() - Notify fb driver of idle power collapse
+ * @mdata: MDP private data
+ *
+ * This function is called if there are active overlays.
+ */
+static void mdss_mdp_notify_idle_pc(struct mdss_data_type *mdata)
+{
+	int i;
+
+	for (i = 0; i < mdata->nctl; i++)
+		if ((mdata->ctl_off[i].ref_cnt) &&
+			!mdss_mdp_ctl_is_power_off(&mdata->ctl_off[i]))
+			mdss_fb_idle_pc(mdata->ctl_off[i].mfd);
+}
+
+/**
  * mdss_mdp_footswitch_ctrl() - Disable/enable MDSS GDSC and CX/Batfet rails
  * @mdata: MDP private data
  * @on: 1 to turn on footswitch, 0 to turn off footswitch
@@ -4870,14 +4983,21 @@
 				 * Turning off GDSC while overlays are still
 				 * active.
 				 */
+
+				mdss_mdp_memory_retention_ctrl(MEM_RETAIN_ON,
+					PERIPH_RETAIN_OFF);
 				mdata->idle_pc = true;
+				mdss_mdp_notify_idle_pc(mdata);
 				pr_debug("idle pc. active overlays=%d\n",
 					active_cnt);
-				mdss_mdp_memory_retention_enter();
 			} else {
 				mdss_mdp_cx_ctrl(mdata, false);
 				mdss_mdp_batfet_ctrl(mdata, false);
+				mdss_mdp_memory_retention_ctrl(
+					MEM_RETAIN_OFF,
+					PERIPH_RETAIN_OFF);
 			}
+			mdata->mem_retain = true;
 			if (mdata->en_svs_high)
 				mdss_mdp_config_cx_voltage(mdata, false);
 			regulator_disable(mdata->fs);
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index 9497318..78ee489 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -350,6 +350,9 @@
 
 	/* to update lineptr, [1..yres] - enable, 0 - disable */
 	int (*update_lineptr)(struct mdss_mdp_ctl *ctl, bool enable);
+
+	/* to wait for vsync */
+	int (*wait_for_vsync_fnc)(struct mdss_mdp_ctl *ctl);
 };
 
 /* FRC info used for Deterministic Frame Rate Control */
@@ -648,6 +651,7 @@
 
 	/* vsync handler for FRC */
 	struct mdss_mdp_vsync_handler frc_vsync_handler;
+	bool need_vsync_on;
 };
 
 struct mdss_mdp_mixer {
@@ -664,6 +668,8 @@
 	bool valid_roi;
 	bool roi_changed;
 	struct mdss_rect roi;
+	bool dsc_enabled;
+	bool dsc_merge_enabled;
 
 	u8 cursor_enabled;
 	u16 cursor_hotx;
@@ -738,6 +744,7 @@
 	struct dma_buf *srcp_dma_buf;
 	struct dma_buf_attachment *srcp_attachment;
 	struct sg_table *srcp_table;
+	struct ion_handle *ihandle;
 };
 
 enum mdss_mdp_data_state {
@@ -1026,6 +1033,8 @@
 	struct kthread_worker worker;
 	struct kthread_work vsync_work;
 	struct task_struct *thread;
+
+	bool cache_null_commit; /* Cache if preceding commit was NULL */
 };
 
 struct mdss_mdp_set_ot_params {
@@ -1620,7 +1629,7 @@
 
 void mdss_mdp_footswitch_ctrl_splash(int on);
 void mdss_mdp_batfet_ctrl(struct mdss_data_type *mdata, int enable);
-void mdss_mdp_set_clk_rate(unsigned long min_clk_rate);
+void mdss_mdp_set_clk_rate(unsigned long min_clk_rate, bool locked);
 unsigned long mdss_mdp_get_clk_rate(u32 clk_idx, bool locked);
 int mdss_mdp_vsync_clk_enable(int enable, bool locked);
 void mdss_mdp_clk_ctrl(int enable);
@@ -1730,7 +1739,7 @@
 void mdss_mdp_ctl_notifier_unregister(struct mdss_mdp_ctl *ctl,
 	struct notifier_block *notifier);
 u32 mdss_mdp_ctl_perf_get_transaction_status(struct mdss_mdp_ctl *ctl);
-u32 apply_comp_ratio_factor(u32 quota, struct mdss_mdp_format_params *fmt,
+u64 apply_comp_ratio_factor(u64 quota, struct mdss_mdp_format_params *fmt,
 	struct mult_factor *factor);
 
 int mdss_mdp_scan_pipes(void);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_cdm.c b/drivers/video/fbdev/msm/mdss_mdp_cdm.c
index ab680f5..3d1be10 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_cdm.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_cdm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -378,6 +378,17 @@
 		return -EINVAL;
 	}
 
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	mutex_lock(&cdm->lock);
+	/* Disable HDMI packer */
+	writel_relaxed(0x0, cdm->base + MDSS_MDP_REG_CDM_HDMI_PACK_OP_MODE);
+
+	/* Put CDM in bypass */
+	writel_relaxed(0x0, cdm->mdata->mdp_base + MDSS_MDP_MDP_OUT_CTL_0);
+
+	mutex_unlock(&cdm->lock);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
 	kref_put(&cdm->kref, mdss_mdp_cdm_free);
 
 	return rc;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index 9de02d9..4cfb48d 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -31,6 +31,7 @@
 #define MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM 2
 #define NUM_MIXERCFG_REGS 3
 #define MDSS_MDP_WB_OUTPUT_BPP	3
+#define MIN_BUS_THROUGHPUT_SCALE_FACTOR 35
 struct mdss_mdp_mixer_cfg {
 	u32 config_masks[NUM_MIXERCFG_REGS];
 	bool border_enabled;
@@ -608,11 +609,21 @@
 }
 
 static inline bool __is_vert_downscaling(u32 src_h,
-	struct mdss_rect dst){
-
+	struct mdss_rect dst)
+{
 	return (src_h > dst.h);
 }
 
+static inline bool __is_bus_throughput_factor_required(u32 src_h,
+	struct mdss_rect dst)
+{
+	u64 scale_factor = src_h * 10;
+
+	do_div(scale_factor, dst.h);
+	return (__is_vert_downscaling(src_h, dst) &&
+		(scale_factor >= MIN_BUS_THROUGHPUT_SCALE_FACTOR));
+}
+
 static u32 get_pipe_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
 	struct mdss_rect src, struct mdss_rect dst,
 	u32 fps, u32 v_total, u32 flags)
@@ -659,6 +670,15 @@
 		}
 	}
 
+	/*
+	 * If the downscale factor is >= 3.5 for a 32 BPP surface,
+	 * it is recommended to add a 10% bus throughput factor to
+	 * the clock rate.
+	 */
+	if ((pipe->src_fmt->bpp == 4) &&
+		__is_bus_throughput_factor_required(src_h, dst))
+		rate = apply_fudge_factor(rate, &mdata->bus_throughput_factor);
+
 	if (flags & PERF_CALC_PIPE_APPLY_CLK_FUDGE)
 		rate = mdss_mdp_clk_fudge_factor(mixer, rate);
 
@@ -816,7 +836,7 @@
 	return factor->numer && factor->denom;
 }
 
-u32 apply_comp_ratio_factor(u32 quota,
+u64 apply_comp_ratio_factor(u64 quota,
 	struct mdss_mdp_format_params *fmt,
 	struct mult_factor *factor)
 {
@@ -907,7 +927,7 @@
 
 static u32 __get_min_prefill_line_time_us(struct mdss_mdp_ctl *ctl)
 {
-	u32 vbp_min = 0;
+	u32 vbp_min = UINT_MAX;
 	int i;
 	struct mdss_data_type *mdata;
 
@@ -929,6 +949,9 @@
 		}
 	}
 
+	if (vbp_min == UINT_MAX)
+		vbp_min = 0;
+
 	return vbp_min;
 }
 
@@ -2055,12 +2078,11 @@
 	return bw_sum_of_intfs;
 }
 
-static void mdss_mdp_ctl_update_client_vote(struct mdss_data_type *mdata,
+/* apply any adjustments to the ib quota */
+static inline u64 __calc_bus_ib_quota(struct mdss_data_type *mdata,
 	struct mdss_mdp_perf_params *perf, bool nrt_client, u64 bw_vote)
 {
-	u64 bus_ab_quota, bus_ib_quota;
-
-	bus_ab_quota = max(bw_vote, mdata->perf_tune.min_bus_vote);
+	u64 bus_ib_quota;
 
 	if (test_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map)) {
 		if (!nrt_client)
@@ -2087,6 +2109,18 @@
 		bus_ib_quota = apply_fudge_factor(bus_ib_quota,
 			&mdata->per_pipe_ib_factor);
 
+	return bus_ib_quota;
+}
+
+static void mdss_mdp_ctl_update_client_vote(struct mdss_data_type *mdata,
+	struct mdss_mdp_perf_params *perf, bool nrt_client, u64 bw_vote)
+{
+	u64 bus_ab_quota, bus_ib_quota;
+
+	bus_ab_quota = max(bw_vote, mdata->perf_tune.min_bus_vote);
+	bus_ib_quota = __calc_bus_ib_quota(mdata, perf, nrt_client, bw_vote);
+
+
 	bus_ab_quota = apply_fudge_factor(bus_ab_quota, &mdss_res->ab_factor);
 	ATRACE_INT("bus_quota", bus_ib_quota);
 
@@ -2252,6 +2286,48 @@
 	return false;
 }
 
+static bool __mdss_mdp_compare_bw(
+	struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_perf_params *new_perf,
+	struct mdss_mdp_perf_params *old_perf,
+	bool params_changed,
+	bool stop_req)
+{
+	struct mdss_data_type *mdata = ctl->mdata;
+	bool is_nrt = mdss_mdp_is_nrt_ctl_path(ctl);
+	u64 new_ib =
+		__calc_bus_ib_quota(mdata, new_perf, is_nrt, new_perf->bw_ctl);
+	u64 old_ib =
+		__calc_bus_ib_quota(mdata, old_perf, is_nrt, old_perf->bw_ctl);
+	u64 new_ab = new_perf->bw_ctl;
+	u64 old_ab = old_perf->bw_ctl;
+	bool update_bw = false;
+
+	/*
+	 * three cases for bus bandwidth update.
+	 * 1. new bandwidth vote (ab or ib) or writeback output vote
+	 *		are higher than current vote for update request.
+	 * 2. new bandwidth vote or writeback output vote are
+	 *		lower than current vote at end of commit or stop.
+	 * 3. end of writeback/rotator session - last chance to
+	 *		non-realtime remove vote.
+	 */
+	if ((params_changed &&
+			(((new_ib > old_ib) || (new_ab > old_ab)) ||
+			(new_perf->bw_writeback > old_perf->bw_writeback))) ||
+			(!params_changed &&
+			(((new_ib < old_ib) || (new_ab < old_ab)) ||
+			(new_perf->bw_writeback < old_perf->bw_writeback))) ||
+			(stop_req && is_nrt))
+		update_bw = true;
+
+	trace_mdp_compare_bw(new_perf->bw_ctl, new_ib, new_perf->bw_writeback,
+		old_perf->bw_ctl, old_ib, old_perf->bw_writeback,
+		params_changed, update_bw);
+
+	return update_bw;
+}
+
 static void mdss_mdp_ctl_perf_update(struct mdss_mdp_ctl *ctl,
 		int params_changed, bool stop_req)
 {
@@ -2287,20 +2363,8 @@
 		else if (is_bw_released || params_changed)
 			mdss_mdp_perf_calc_ctl(ctl, new);
 
-		/*
-		 * three cases for bus bandwidth update.
-		 * 1. new bandwidth vote or writeback output vote
-		 *    are higher than current vote for update request.
-		 * 2. new bandwidth vote or writeback output vote are
-		 *    lower than current vote at end of commit or stop.
-		 * 3. end of writeback/rotator session - last chance to
-		 *    non-realtime remove vote.
-		 */
-		if ((params_changed && ((new->bw_ctl > old->bw_ctl) ||
-			(new->bw_writeback > old->bw_writeback))) ||
-		    (!params_changed && ((new->bw_ctl < old->bw_ctl) ||
-			(new->bw_writeback < old->bw_writeback))) ||
-			(stop_req && mdss_mdp_is_nrt_ctl_path(ctl))) {
+		if (__mdss_mdp_compare_bw(ctl, new, old, params_changed,
+				stop_req)) {
 
 			pr_debug("c=%d p=%d new_bw=%llu,old_bw=%llu\n",
 				ctl->num, params_changed, new->bw_ctl,
@@ -2355,7 +2419,7 @@
 	 */
 	if (update_clk) {
 		ATRACE_INT("mdp_clk", clk_rate);
-		mdss_mdp_set_clk_rate(clk_rate);
+		mdss_mdp_set_clk_rate(clk_rate, false);
 		pr_debug("update clk rate = %d HZ\n", clk_rate);
 	}
 
@@ -2454,6 +2518,7 @@
 	u32 nmixers_wb;
 	u32 i;
 	u32 nmixers;
+	u32 nmixers_active;
 	struct mdss_mdp_mixer *mixer_pool = NULL;
 
 	if (!ctl || !ctl->mdata)
@@ -2467,10 +2532,21 @@
 	case MDSS_MDP_MIXER_TYPE_INTF:
 		mixer_pool = ctl->mdata->mixer_intf;
 		nmixers = nmixers_intf;
+		nmixers_active = nmixers;
+
+		for (i = 0; i < nmixers; i++) {
+			mixer = mixer_pool + i;
+			if (mixer->ref_cnt)
+				nmixers_active--;
+		}
+		mixer = NULL;
 
 		/*
 		 * try to reserve first layer mixer for write back if
-		 * assertive display needs to be supported through wfd
+		 * assertive display needs to be supported through wfd.
+		 * For external displays(pluggable) and writeback avoid
+		 * allocating mixers LM0 and LM1 which are allocated
+		 * to primary display first.
 		 */
 		if (ctl->mdata->has_wb_ad && ctl->intf_num &&
 			((ctl->panel_data->panel_info.type != MIPI_CMD_PANEL) ||
@@ -2482,6 +2558,10 @@
 			&& (ctl->mdata->ndspp < nmixers)) {
 			mixer_pool += ctl->mdata->ndspp;
 			nmixers -= ctl->mdata->ndspp;
+		} else if ((ctl->panel_data->panel_info.is_pluggable) &&
+				nmixers_active) {
+			mixer_pool += ctl->mdata->ndspp;
+			nmixers -= ctl->mdata->ndspp;
 		}
 		break;
 
@@ -2695,10 +2775,7 @@
 	if (!clk_period)
 		return -EINVAL;
 
-	time_of_line = (pinfo->lcdc.h_back_porch +
-		 pinfo->lcdc.h_front_porch +
-		 pinfo->lcdc.h_pulse_width +
-		 pinfo->xres) * clk_period;
+	time_of_line = mdss_panel_get_htotal(pinfo, true) * clk_period;
 
 	time_of_line /= 1000;	/* in nano second */
 	if (!time_of_line)
@@ -2706,10 +2783,7 @@
 
 	current_line = ctl->ops.read_line_cnt_fnc(ctl);
 
-	total_line = pinfo->lcdc.v_back_porch +
-		pinfo->lcdc.v_front_porch +
-		pinfo->lcdc.v_pulse_width +
-		pinfo->yres;
+	total_line = mdss_panel_get_vtotal(pinfo);
 
 	if (current_line >= total_line)
 		time_to_vsync = time_of_line * total_line;
@@ -2835,6 +2909,7 @@
 {
 	mdss_mdp_pingpong_write(mixer->pingpong_base,
 			MDSS_MDP_REG_PP_DSC_MODE, 1);
+	mixer->dsc_enabled = true;
 }
 
 static inline void __dsc_disable(struct mdss_mdp_mixer *mixer)
@@ -2854,6 +2929,13 @@
 		return;
 	}
 	writel_relaxed(0, offset + MDSS_MDP_REG_DSC_COMMON_MODE);
+	mixer->dsc_enabled = false;
+	mixer->dsc_merge_enabled = false;
+}
+
+static bool __is_dsc_merge_enabled(u32 common_mode)
+{
+	return common_mode & BIT(1);
 }
 
 static void __dsc_config(struct mdss_mdp_mixer *mixer,
@@ -2866,6 +2948,7 @@
 	u32 initial_lines = dsc->initial_lines;
 	bool is_cmd_mode = !(mode & BIT(2));
 
+	mixer->dsc_merge_enabled = __is_dsc_merge_enabled(mode);
 	data = mdss_mdp_pingpong_read(mixer->pingpong_base,
 			MDSS_MDP_REG_PP_DCE_DATA_OUT_SWAP);
 	data |= BIT(18); /* endian flip */
@@ -3020,11 +3103,6 @@
 	}
 }
 
-static bool __is_dsc_merge_enabled(u32 common_mode)
-{
-	return common_mode & BIT(1);
-}
-
 static bool __dsc_is_3d_mux_enabled(struct mdss_mdp_ctl *ctl,
 	struct mdss_panel_info *pinfo)
 {
@@ -3366,8 +3444,19 @@
 	struct mdss_mdp_ctl *sctl;
 	struct mdss_panel_info *spinfo;
 
-	if (!is_dsc_compression(pinfo))
+	/*
+	 * Check for dynamic resolution switch from DSC On to DSC Off
+	 * and disable DSC
+	 */
+	if ((ctl->pending_mode_switch == SWITCH_RESOLUTION) &&
+	    ctl->is_master &&
+	    (!is_dsc_compression(pinfo))) {
+		if (ctl->mixer_left && ctl->mixer_left->dsc_enabled)
+			__dsc_disable(ctl->mixer_left);
+		if (ctl->mixer_right && ctl->mixer_right->dsc_enabled)
+			__dsc_disable(ctl->mixer_right);
 		return;
+	}
 
 	if (!ctl->is_master) {
 		pr_debug("skip slave ctl because master will program for both\n");
@@ -3653,6 +3742,30 @@
 			ctl->mixer_right->width = ctl->width / 2;
 			ctl->mixer_right->height = ctl->height;
 		}
+
+		/*
+		 * If we are transitioning from  DSC On + DSC Merge to DSC Off
+		 * the 3D mux needs to be enabled
+		 */
+		if (!is_dsc_compression(&pdata->panel_info) &&
+		    ctl->mixer_left &&
+		    ctl->mixer_left->dsc_enabled &&
+		    ctl->mixer_left->dsc_merge_enabled) {
+			ctl->opmode |= MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+				       MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT;
+		}
+
+		/*
+		 * If we are transitioning from DSC Off to DSC On + DSC Merge
+		 * the 3D mux needs to be disabled
+		 */
+		if (is_dsc_compression(&pdata->panel_info) &&
+		    ctl->mixer_left &&
+		    !ctl->mixer_left->dsc_enabled &&
+		    pdata->panel_info.dsc_enc_total != 1) {
+			ctl->opmode &= ~(MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+				  MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT);
+		}
 	} else {
 		/*
 		 * Handles MDP_SPLIT_MODE_NONE, MDP_DUAL_LM_DUAL_DISPLAY and
@@ -3667,7 +3780,6 @@
 
 	ctl->border_x_off = pdata->panel_info.lcdc.border_left;
 	ctl->border_y_off = pdata->panel_info.lcdc.border_top;
-
 	return ret;
 }
 
@@ -5577,10 +5689,10 @@
 				sctl->flush_bits = 0;
 				sctl_flush_bits = 0;
 			} else {
-				sctl_flush_bits = sctl->flush_bits;
+				sctl_flush_bits |= sctl->flush_bits;
 			}
 		}
-		ctl_flush_bits = ctl->flush_bits;
+		ctl_flush_bits |= ctl->flush_bits;
 		mutex_unlock(&ctl->flush_lock);
 	}
 	/*
@@ -5670,6 +5782,10 @@
 		mdss_mdp_bwcpanic_ctrl(mdata, true);
 
 	ATRACE_BEGIN("flush_kickoff");
+
+	MDSS_XLOG(ctl->intf_num, ctl_flush_bits, sctl_flush_bits,
+		mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_FLUSH), split_lm_valid);
+
 	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl_flush_bits);
 	if (sctl && sctl_flush_bits) {
 		mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH,
@@ -5703,6 +5819,18 @@
 	if (ret)
 		pr_warn("ctl %d error displaying frame\n", ctl->num);
 
+	/* update backlight in commit */
+	if (ctl->intf_type == MDSS_INTF_DSI && !ctl->is_video_mode &&
+	    ctl->mfd && ctl->mfd->bl_extn_level > 0) {
+		if (!IS_CALIB_MODE_BL(ctl->mfd) && (!ctl->mfd->ext_bl_ctrl ||
+						!ctl->mfd->bl_level)) {
+			mutex_lock(&ctl->mfd->bl_lock);
+			mdss_fb_set_backlight(ctl->mfd,
+					      ctl->mfd->bl_extn_level);
+			mutex_unlock(&ctl->mfd->bl_lock);
+		}
+	}
+
 	ctl->play_cnt++;
 	ATRACE_END("flush_kickoff");
 
diff --git a/drivers/video/fbdev/msm/mdss_mdp_debug.c b/drivers/video/fbdev/msm/mdss_mdp_debug.c
index 6024ea1..bd4ee23 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_debug.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_debug.c
@@ -955,6 +955,11 @@
 	mdata->dbg_bus_size = 0;
 
 	switch (mdata->mdp_rev) {
+	case MDSS_MDP_HW_REV_114:
+	case MDSS_MDP_HW_REV_116:
+		mdata->dbg_bus = dbg_bus_8996;
+		mdata->dbg_bus_size = ARRAY_SIZE(dbg_bus_8996);
+		break;
 	case MDSS_MDP_HW_REV_107:
 	case MDSS_MDP_HW_REV_107_1:
 	case MDSS_MDP_HW_REV_107_2:
@@ -1061,12 +1066,14 @@
 	seq_puts(s, "\n");
 }
 
-static void __dump_pipe(struct seq_file *s, struct mdss_mdp_pipe *pipe)
+static void __dump_pipe(struct seq_file *s, struct mdss_mdp_pipe *pipe,
+		struct msm_fb_data_type *mfd)
 {
 	struct mdss_mdp_data *buf;
 	int format;
 	int smps[4];
 	int i;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 
 	seq_printf(s, "\nSSPP #%d type=%s ndx=%x flags=0x%08x play_cnt=%u xin_id=%d\n",
 			pipe->num, mdss_mdp_pipetype2str(pipe->type),
@@ -1121,11 +1128,14 @@
 
 	seq_puts(s, "Data:\n");
 
+	mutex_lock(&mdp5_data->list_lock);
 	list_for_each_entry(buf, &pipe->buf_queue, pipe_list)
 		__print_buf(s, buf, false);
+	mutex_unlock(&mdp5_data->list_lock);
 }
 
-static void __dump_mixer(struct seq_file *s, struct mdss_mdp_mixer *mixer)
+static void __dump_mixer(struct seq_file *s, struct mdss_mdp_mixer *mixer,
+		struct msm_fb_data_type *mfd)
 {
 	struct mdss_mdp_pipe *pipe;
 	int i, cnt = 0;
@@ -1142,7 +1152,7 @@
 	for (i = 0; i < ARRAY_SIZE(mixer->stage_pipe); i++) {
 		pipe = mixer->stage_pipe[i];
 		if (pipe) {
-			__dump_pipe(s, pipe);
+			__dump_pipe(s, pipe, mfd);
 			cnt++;
 		}
 	}
@@ -1219,8 +1229,8 @@
 	seq_printf(s, "Play Count=%u  Underrun Count=%u\n",
 			ctl->play_cnt, ctl->underrun_cnt);
 
-	__dump_mixer(s, ctl->mixer_left);
-	__dump_mixer(s, ctl->mixer_right);
+	__dump_mixer(s, ctl->mixer_left, ctl->mfd);
+	__dump_mixer(s, ctl->mixer_right, ctl->mfd);
 }
 
 static int __dump_mdp(struct seq_file *s, struct mdss_data_type *mdata)
@@ -1414,7 +1424,7 @@
 		seq_printf(s, "vsync: %08u \tunderrun: %08u\n",
 				ctl->vsync_cnt, ctl->underrun_cnt);
 		if (ctl->mfd) {
-			seq_printf(s, "user_bl: %08llu \tmod_bl: %08u\n",
+			seq_printf(s, "user_bl: %llu \tmod_bl: %08u\n",
 				ctl->mfd->bl_level, ctl->mfd->bl_level_scaled);
 		}
 	} else {
diff --git a/drivers/video/fbdev/msm/mdss_mdp_hwio.h b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
index 54ceb19..5bff904 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_hwio.h
+++ b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
@@ -820,6 +820,8 @@
 #define MMSS_VBIF_WR_LIM_CONF			0x0C0
 #define MMSS_VBIF_OUT_RD_LIM_CONF0		0x0D0
 
+#define MMSS_VBIF_SRC_ERR		0x194
+#define MMSS_VBIF_ERR_INFO		0x1A0
 #define MMSS_VBIF_XIN_HALT_CTRL0	0x200
 #define MMSS_VBIF_XIN_HALT_CTRL1	0x204
 #define MMSS_VBIF_AXI_HALT_CTRL0	0x208
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index b49e954..947a3fe 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -52,8 +52,10 @@
 	u32 current_pp_num;
 	/*
 	 * aux_pp_num will be set only when topology is using split-lm.
-	 * aux_pp_num will be used only when MDSS_QUIRK_DSC_RIGHT_ONLY_PU
-	 * quirk is set and on following partial updates.
+	 * aux_pp_num will be used
+	 * if right-only update on DUAL_LM_SINGLE_DISPLAY with 3D Mux
+	 * or if MDSS_QUIRK_DSC_RIGHT_ONLY_PU quirk is set
+	 * and on following partial updates.
 	 *
 	 * right-only update on DUAL_LM_SINGLE_DISPLAY with DSC_MERGE
 	 * right-only update on DUAL_LM_DUAL_DISPLAY with DSC
@@ -76,6 +78,7 @@
 	struct mutex clk_mtx;
 	spinlock_t clk_lock;
 	spinlock_t koff_lock;
+	spinlock_t ctlstart_lock;
 	struct work_struct gate_clk_work;
 	struct delayed_work delayed_off_clk_work;
 	struct work_struct pp_done_work;
@@ -119,14 +122,32 @@
 static bool __mdss_mdp_cmd_is_aux_pp_needed(struct mdss_data_type *mdata,
 	struct mdss_mdp_ctl *mctl)
 {
-	return (mdata && mctl && mctl->is_master &&
-		mdss_has_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU) &&
-		is_dsc_compression(&mctl->panel_data->panel_info) &&
-		((mctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) ||
-		 ((mctl->mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) &&
-		  (mctl->panel_data->panel_info.dsc_enc_total == 1))) &&
-		!mctl->mixer_left->valid_roi &&
-		mctl->mixer_right->valid_roi);
+	bool mux3d, merge, quirk, rightonly;
+
+	if (!mdata || !mctl || !mctl->is_master)
+		return false;
+
+	/*
+	 * aux_pp_num will be used:
+	 * if right-only update on DUAL_LM_SINGLE_DISPLAY with 3D Mux
+	 * or if MDSS_QUIRK_DSC_RIGHT_ONLY_PU quirk is set
+	 * and on following partial updates.
+	 *
+	 * right-only update on DUAL_LM_SINGLE_DISPLAY with DSC_MERGE
+	 * right-only update on DUAL_LM_DUAL_DISPLAY with DSC
+	 */
+	mux3d = ((mctl->mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) &&
+			(mctl->opmode & MDSS_MDP_CTL_OP_PACK_3D_ENABLE));
+	merge = ((mctl->mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) &&
+			(mctl->panel_data->panel_info.dsc_enc_total == 1));
+	quirk = (mdss_has_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU) &&
+			is_dsc_compression(&mctl->panel_data->panel_info) &&
+			((mctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) ||
+			merge));
+	rightonly = !mctl->mixer_left->valid_roi &&
+			mctl->mixer_right->valid_roi;
+
+	return ((mux3d || quirk) && rightonly);
 }
 
 static bool __mdss_mdp_cmd_is_panel_power_off(struct mdss_mdp_cmd_ctx *ctx)
@@ -147,15 +168,11 @@
 	u32 init;
 	u32 height;
 
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
-
 	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
 	if (!mixer) {
 		mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
-		if (!mixer) {
-			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+		if (!mixer)
 			goto exit;
-		}
 	}
 
 	init = mdss_mdp_pingpong_read(mixer->pingpong_base,
@@ -163,10 +180,8 @@
 	height = mdss_mdp_pingpong_read(mixer->pingpong_base,
 		MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT) & 0xffff;
 
-	if (height < init) {
-		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	if (height < init)
 		goto exit;
-	}
 
 	cnt = mdss_mdp_pingpong_read(mixer->pingpong_base,
 		MDSS_MDP_REG_PP_INT_COUNT_VAL) & 0xffff;
@@ -176,13 +191,21 @@
 	else
 		cnt -= init;
 
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
-
 	pr_debug("cnt=%d init=%d height=%d\n", cnt, init, height);
 exit:
 	return cnt;
 }
 
+static inline u32 mdss_mdp_cmd_line_count_wrapper(struct mdss_mdp_ctl *ctl)
+{
+	u32 ret;
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	ret = mdss_mdp_cmd_line_count(ctl);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	return ret;
+}
+
 static int mdss_mdp_tearcheck_enable(struct mdss_mdp_ctl *ctl, bool enable)
 {
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
@@ -335,6 +358,138 @@
 	return 0;
 }
 
+/**
+ * mdss_mdp_cmd_autorefresh_pp_done() - pp done irq callback for autorefresh
+ * @arg: void pointer to the controller context.
+ *
+ * This function is the pp_done interrupt callback while disabling
+ * autorefresh. This function does not modify the kickoff count (koff_cnt).
+ */
+static void mdss_mdp_cmd_autorefresh_pp_done(void *arg)
+{
+	struct mdss_mdp_ctl *ctl = arg;
+	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+
+	if (!ctx) {
+		pr_err("%s: invalid ctx\n", __func__);
+		return;
+	}
+
+	mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+		ctx->current_pp_num);
+	mdss_mdp_set_intr_callback_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+		ctx->current_pp_num, NULL, NULL);
+
+	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), ctx->current_pp_num);
+	complete_all(&ctx->autorefresh_ppdone);
+
+	pr_debug("%s: ctl_num=%d intf_num=%d ctx=%d cnt=%d\n", __func__,
+		ctl->num, ctl->intf_num, ctx->current_pp_num,
+		atomic_read(&ctx->koff_cnt));
+}
+
+static void mdss_mdp_cmd_wait4_autorefresh_pp(struct mdss_mdp_ctl *ctl)
+{
+	int rc;
+	u32 val, line_out, intr_type = MDSS_MDP_IRQ_TYPE_PING_PONG_COMP;
+	char __iomem *pp_base = ctl->mixer_left->pingpong_base;
+	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+
+	line_out = mdss_mdp_pingpong_read(pp_base, MDSS_MDP_REG_PP_LINE_COUNT);
+
+	MDSS_XLOG(ctl->num, line_out, ctl->mixer_left->roi.h);
+	pr_debug("ctl:%d line_out:%d\n", ctl->num, line_out);
+
+	if (!ctx) {
+		pr_err("%s: invalid ctx\n", __func__);
+		return;
+	}
+
+	if (line_out < ctl->mixer_left->roi.h) {
+		reinit_completion(&ctx->autorefresh_ppdone);
+
+		/* enable ping pong done */
+		mdss_mdp_set_intr_callback(intr_type, ctx->current_pp_num,
+					mdss_mdp_cmd_autorefresh_pp_done, ctl);
+		mdss_mdp_irq_enable(intr_type, ctx->current_pp_num);
+
+		/* wait for ping pong done */
+		rc = wait_for_completion_timeout(&ctx->autorefresh_ppdone,
+				KOFF_TIMEOUT);
+		if (rc <= 0) {
+			val = mdss_mdp_pingpong_read(pp_base,
+				MDSS_MDP_REG_PP_LINE_COUNT);
+			if (val == ctl->mixer_left->roi.h) {
+				mdss_mdp_irq_clear(ctl->mdata,
+					MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+					ctx->current_pp_num);
+				mdss_mdp_irq_disable_nosync(intr_type,
+					ctx->current_pp_num);
+				mdss_mdp_set_intr_callback(intr_type,
+					ctx->current_pp_num, NULL, NULL);
+			} else {
+				pr_err("timedout waiting for ctl%d autorefresh pp done\n",
+					ctl->num);
+				MDSS_XLOG(0xbad3);
+				MDSS_XLOG_TOUT_HANDLER("mdp",
+					"vbif", "dbg_bus", "vbif_dbg_bus",
+					"panic");
+			}
+		}
+	}
+}
+
+static bool __disable_rd_ptr_from_te(char __iomem *pingpong_base)
+{
+	u32 cfg;
+	bool disabled;
+
+	cfg = mdss_mdp_pingpong_read(pingpong_base,
+		MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
+
+	disabled = BIT(20) & cfg;
+	cfg &= ~BIT(20);
+	mdss_mdp_pingpong_write(pingpong_base,
+		MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+
+	return disabled;
+}
+
+static inline void __enable_rd_ptr_from_te(char __iomem *pingpong_base)
+{
+	u32 cfg;
+
+	cfg = mdss_mdp_pingpong_read(pingpong_base,
+		MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
+	cfg |= BIT(20);
+	mdss_mdp_pingpong_write(pingpong_base,
+		MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+}
+
+/*
+ * __disable_autorefresh - disables autorefresh feature in the hw.
+ *
+ * To disable autorefresh, driver needs to make sure no transactions are
+ * on-going; for ensuring this, driver must:
+ *
+ * 1. Disable listening to the external TE (this gives extra time before
+ *     trigger next transaction).
+ * 2. Wait for any on-going transaction (wait for ping pong done interrupt).
+ * 3. Disable auto-refresh.
+ * 4. Re-enable listening to the external panel TE.
+ *
+ * So it is responsability of the caller of this function to only call to
+ * disable autorefresh if no hw transaction is on-going (wait for ping pong)
+ * and if the listening for the external TE is disabled in the tear
+ * check logic (this to prevent any race conditions with the hw), as mentioned
+ * in the above steps.
+ */
+static inline void __disable_autorefresh(char __iomem *pingpong_base)
+{
+	mdss_mdp_pingpong_write(pingpong_base,
+		MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0x0);
+}
+
 static int mdss_mdp_cmd_tearcheck_setup(struct mdss_mdp_cmd_ctx *ctx,
 		bool locked)
 {
@@ -342,7 +497,7 @@
 	struct mdss_mdp_mixer *mixer = NULL, *mixer_right = NULL;
 	struct mdss_mdp_ctl *ctl = ctx->ctl;
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
-	u32 offset = 0;
+	bool rd_ptr_disabled = false;
 
 	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
 	if (mixer) {
@@ -352,21 +507,35 @@
 		 */
 		if (mdss_mdp_pingpong_read(mixer->pingpong_base,
 			MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG) & BIT(31)) {
-			offset = MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG;
+
+			/* 1. disable rd pointer from the external te  */
+			rd_ptr_disabled =
+				__disable_rd_ptr_from_te(mixer->pingpong_base);
+
+			/* 2. wait for previous transfer to finish */
+			mdss_mdp_cmd_wait4_autorefresh_pp(ctl);
+
+			/* 3. disable autorefresh  */
 			if (is_pingpong_split(ctl->mfd))
-				writel_relaxed(0x0,
-					(mdata->slave_pingpong_base + offset));
+				__disable_autorefresh(
+					mdata->slave_pingpong_base);
+
 			if (is_split_lm(ctl->mfd)) {
-				mixer_right =
-					mdss_mdp_mixer_get(ctl,
-						MDSS_MDP_MIXER_MUX_RIGHT);
+				mixer_right = mdss_mdp_mixer_get(ctl,
+					MDSS_MDP_MIXER_MUX_RIGHT);
+
 				if (mixer_right)
-					writel_relaxed(0x0,
-					(mixer_right->pingpong_base + offset));
+					__disable_autorefresh(
+						mixer_right->pingpong_base);
 			}
-			mdss_mdp_pingpong_write(mixer->pingpong_base,
-				MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0x0);
+
+			__disable_autorefresh(mixer->pingpong_base);
 			pr_debug("%s: disabling auto refresh\n", __func__);
+
+			/* 4. re-enable rd pointer from te (if was enabled) */
+			if (rd_ptr_disabled)
+				__enable_rd_ptr_from_te(mixer->pingpong_base);
+
 		}
 		rc = mdss_mdp_cmd_tearcheck_cfg(mixer, ctx, locked);
 		if (rc)
@@ -527,6 +696,8 @@
 				pr_err("%s cannot find master ctl\n",
 					__func__);
 				WARN_ON(1);
+				rc = -EINVAL;
+				goto exit;
 			}
 			/*
 			 * We have both controllers but sctl has the Master,
@@ -999,6 +1170,7 @@
 	vsync_time = ktime_get();
 	ctl->vsync_cnt++;
 	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
+	trace_mdp_cmd_readptr_done(ctl->num, atomic_read(&ctx->koff_cnt));
 	complete_all(&ctx->rdptr_done);
 
 	/* If caller is waiting for the read pointer, notify. */
@@ -1135,16 +1307,21 @@
 		return -EINVAL;
 
 	/*
-	 * Currently, only intf_fifo_underflow is
+	 * Currently, intf_fifo_overflow is not
 	 * supported for recovery sequence for command
 	 * mode DSI interface
 	 */
-	if (event != MDP_INTF_DSI_CMD_FIFO_UNDERFLOW) {
+	if (event == MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW) {
 		pr_warn("%s: unsupported recovery event:%d\n",
 					__func__, event);
 		return -EPERM;
 	}
 
+	if (event == MDP_INTF_DSI_PANEL_DEAD) {
+		mdss_fb_report_panel_dead(ctx->ctl->mfd);
+		return 0;
+	}
+
 	if (atomic_read(&ctx->koff_cnt)) {
 		mdss_mdp_ctl_reset(ctx->ctl, true);
 		reset_done = true;
@@ -1419,36 +1596,6 @@
 	return 0;
 }
 
-/**
- * mdss_mdp_cmd_autorefresh_pp_done() - pp done irq callback for autorefresh
- * @arg: void pointer to the controller context.
- *
- * This function is the pp_done interrupt callback while disabling
- * autorefresh. This function does not modify the kickoff count (koff_cnt).
- */
-static void mdss_mdp_cmd_autorefresh_pp_done(void *arg)
-{
-	struct mdss_mdp_ctl *ctl = arg;
-	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
-
-	if (!ctx) {
-		pr_err("%s: invalid ctx\n", __func__);
-		return;
-	}
-
-	mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
-		ctx->current_pp_num);
-	mdss_mdp_set_intr_callback_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
-		ctx->current_pp_num, NULL, NULL);
-
-	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), ctx->current_pp_num);
-	complete_all(&ctx->autorefresh_ppdone);
-
-	pr_debug("%s: ctl_num=%d intf_num=%d ctx=%d cnt=%d\n", __func__,
-		ctl->num, ctl->intf_num, ctx->current_pp_num,
-		atomic_read(&ctx->koff_cnt));
-}
-
 static void pingpong_done_work(struct work_struct *work)
 {
 	u32 status;
@@ -1851,22 +1998,34 @@
 	bool handoff)
 {
 	struct mdss_panel_data *pdata;
-	struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+	struct mdss_mdp_ctl *sctl = NULL;
+	struct mdss_mdp_cmd_ctx *sctx = NULL;
 	struct dsi_panel_clk_ctrl clk_ctrl;
 	int ret = 0;
 
+	/* Get both controllers in the correct order for dual displays */
+	mdss_mdp_get_split_display_ctls(&ctl, &sctl);
+
+	if (sctl)
+		sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+
+	/* In pingpong split we have single controller, dual context */
+	if (is_pingpong_split(ctl->mfd))
+		sctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[SLAVE_CTX];
+
 	pdata = ctl->panel_data;
 
 	clk_ctrl.state = MDSS_DSI_CLK_OFF;
 	clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
-	if (sctl) {
+
+	if (sctx) { /* then slave */
 		u32 flags = CTL_INTF_EVENT_FLAG_SKIP_BROADCAST;
 
-		if (is_pingpong_split(sctl->mfd))
+		if (sctx->pingpong_split_slave)
 			flags |= CTL_INTF_EVENT_FLAG_SLAVE_INTF;
 
-		mdss_mdp_ctl_intf_event(sctl, MDSS_EVENT_PANEL_CLK_CTRL,
-			(void *)&clk_ctrl, flags);
+		mdss_mdp_ctl_intf_event(sctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL,
+					(void *)&clk_ctrl, flags);
 	}
 
 	mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_CLK_CTRL,
@@ -2063,11 +2222,27 @@
 		return;
 
 	pinfo = &ctl->panel_data->panel_info;
-	if (pinfo->compression_mode != COMPRESSION_DSC)
-		return;
+	if (pinfo->compression_mode != COMPRESSION_DSC) {
+		/*
+		 * Check for a dynamic resolution switch from DSC On to
+		 * DSC Off and call mdss_mdp_ctl_dsc_setup to disable DSC
+		 */
+		if (ctl->pending_mode_switch == SWITCH_RESOLUTION) {
+			if (ctl->mixer_left && ctl->mixer_left->dsc_enabled)
+				changed = true;
+			if (is_split_lm(ctl->mfd) &&
+			    ctl->mixer_right &&
+			    ctl->mixer_right->dsc_enabled)
+				changed = true;
+		} else {
+			return;
+		}
+	}
 
-	changed = ctl->mixer_left->roi_changed;
-	if (is_split_lm(ctl->mfd))
+	if (ctl->mixer_left)
+		changed |= ctl->mixer_left->roi_changed;
+	if (is_split_lm(ctl->mfd) &&
+	    ctl->mixer_right)
 		changed |= ctl->mixer_right->roi_changed;
 
 	if (changed)
@@ -2307,7 +2482,6 @@
 	struct mdss_mdp_cmd_ctx *ctx = mctl->intf_ctx[MASTER_CTX];
 	char __iomem *pp_base;
 	u32 autorefresh_state;
-	u32 cfg;
 
 	if (!mctl->is_master)
 		return;
@@ -2327,11 +2501,8 @@
 		 * instruct MDP to ignore the panel TE so the next auto-refresh
 		 * is delayed until flush bits are set.
 		 */
-		cfg = mdss_mdp_pingpong_read(pp_base,
-			MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
-		cfg &= ~BIT(20);
-		mdss_mdp_pingpong_write(pp_base,
-			MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+		__disable_rd_ptr_from_te(pp_base);
+
 		ctx->ignore_external_te = true;
 
 	}
@@ -2343,7 +2514,6 @@
 {
 	struct mdss_mdp_cmd_ctx *ctx = mctl->intf_ctx[MASTER_CTX];
 	char __iomem *pp_base;
-	u32 cfg;
 
 	if (!mctl->is_master)
 		return;
@@ -2362,60 +2532,12 @@
 		pp_base = mctl->mixer_left->pingpong_base;
 
 		/* enable MDP to listen to the TE */
-		cfg = mdss_mdp_pingpong_read(pp_base,
-			MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
-		cfg |= BIT(20);
-		mdss_mdp_pingpong_write(pp_base,
-			MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+		__enable_rd_ptr_from_te(pp_base);
+
 		ctx->ignore_external_te = false;
 	}
 }
 
-static void mdss_mdp_cmd_wait4_autorefresh_pp(struct mdss_mdp_ctl *ctl)
-{
-	int rc;
-	u32 val, line_out, intr_type = MDSS_MDP_IRQ_TYPE_PING_PONG_COMP;
-	char __iomem *pp_base = ctl->mixer_left->pingpong_base;
-	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
-
-	line_out = mdss_mdp_pingpong_read(pp_base, MDSS_MDP_REG_PP_LINE_COUNT);
-
-	MDSS_XLOG(ctl->num, line_out, ctl->mixer_left->roi.h);
-
-	if ((line_out < ctl->mixer_left->roi.h) && line_out) {
-		reinit_completion(&ctx->autorefresh_ppdone);
-
-		/* enable ping pong done */
-		mdss_mdp_set_intr_callback(intr_type, ctx->current_pp_num,
-					mdss_mdp_cmd_autorefresh_pp_done, ctl);
-		mdss_mdp_irq_enable(intr_type, ctx->current_pp_num);
-
-		/* wait for ping pong done */
-		rc = wait_for_completion_timeout(&ctx->autorefresh_ppdone,
-				KOFF_TIMEOUT);
-		if (rc <= 0) {
-			val = mdss_mdp_pingpong_read(pp_base,
-				MDSS_MDP_REG_PP_LINE_COUNT);
-			if (val == ctl->mixer_left->roi.h) {
-				mdss_mdp_irq_clear(ctl->mdata,
-					MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
-					ctx->current_pp_num);
-				mdss_mdp_irq_disable_nosync(intr_type,
-					ctx->current_pp_num);
-				mdss_mdp_set_intr_callback(intr_type,
-					ctx->current_pp_num, NULL, NULL);
-			} else {
-				pr_err("timedout waiting for ctl%d autorefresh pp done\n",
-					ctl->num);
-				MDSS_XLOG(0xbad3);
-				MDSS_XLOG_TOUT_HANDLER("mdp",
-					"vbif", "dbg_bus", "vbif_dbg_bus",
-					"panic");
-			}
-		}
-	}
-}
-
 static void mdss_mdp_cmd_autorefresh_done(void *arg)
 {
 	struct mdss_mdp_ctl *ctl = arg;
@@ -2489,10 +2611,11 @@
 	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
 	unsigned long flags;
 	unsigned long autorefresh_timeout;
+	u32 timeout_us = 20000;
 
 	line_out = mdss_mdp_pingpong_read(pp_base, MDSS_MDP_REG_PP_LINE_COUNT);
 
-	MDSS_XLOG(ctl->num, line_out, ctl->mixer_left->roi.h);
+	MDSS_XLOG(ctl->num, line_out, ctl->mixer_left->roi.h, 0x111);
 
 	reinit_completion(&ctx->autorefresh_done);
 
@@ -2551,6 +2674,26 @@
 			"dsi1_ctrl", "dsi1_phy", "vbif", "vbif_nrt",
 			"dbg_bus", "vbif_dbg_bus", "panic");
 	}
+
+	/* once autorefresh is done, wait for write pointer */
+	line_out = mdss_mdp_pingpong_read(pp_base, MDSS_MDP_REG_PP_LINE_COUNT);
+	MDSS_XLOG(ctl->num, line_out, 0x222);
+
+	/* wait until the first line is out to make sure transfer is on-going */
+	rc = readl_poll_timeout(pp_base +
+		MDSS_MDP_REG_PP_LINE_COUNT, val,
+		(val & 0xffff) >= 1, 10, timeout_us);
+
+	if (rc) {
+		pr_err("timed out waiting for line out ctl:%d val:0x%x\n",
+			ctl->num, val);
+		MDSS_XLOG(0xbad5, val);
+		MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
+			"dsi1_ctrl", "dsi1_phy", "vbif", "vbif_nrt",
+			"dbg_bus", "vbif_dbg_bus", "panic");
+	}
+
+	MDSS_XLOG(val, 0x333);
 }
 
 /* caller needs to hold autorefresh_lock before calling this function */
@@ -2618,11 +2761,10 @@
 		mdss_mdp_cmd_wait4_autorefresh_pp(sctl);
 
 	/* disable autorefresh */
-	mdss_mdp_pingpong_write(pp_base, MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0);
+	__disable_autorefresh(pp_base);
 
 	if (is_pingpong_split(ctl->mfd))
-		mdss_mdp_pingpong_write(mdata->slave_pingpong_base,
-				MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0);
+		__disable_autorefresh(mdata->slave_pingpong_base);
 
 	ctx->autorefresh_state = MDP_AUTOREFRESH_OFF;
 	ctx->autorefresh_frame_cnt = 0;
@@ -2638,12 +2780,42 @@
 	return 0;
 }
 
+static bool wait_for_read_ptr_if_late(struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_ctl *sctl, struct mdss_panel_info *pinfo)
+{
+	u32 line_count;
+	u32 sline_count = 0;
+	bool ret = true;
+	u32 low_threshold = pinfo->mdp_koff_thshold_low;
+	u32 high_threshold = pinfo->mdp_koff_thshold_high;
+
+	/* read the line count */
+	line_count = mdss_mdp_cmd_line_count(ctl);
+	if (sctl)
+		sline_count = mdss_mdp_cmd_line_count(sctl);
+
+	/* if line count is between the range, return to trigger transfer */
+	if (((line_count > low_threshold) && (line_count < high_threshold)) &&
+	     (!sctl || ((sline_count > low_threshold) &&
+			(sline_count < high_threshold))))
+		ret = false;
+
+	pr_debug("threshold:[%d, %d]\n", low_threshold, high_threshold);
+	pr_debug("line:%d sline:%d ret:%d\n", line_count, sline_count, ret);
+	MDSS_XLOG(line_count, sline_count, ret);
+
+	return ret;
+}
 
 static void __mdss_mdp_kickoff(struct mdss_mdp_ctl *ctl,
-	struct mdss_mdp_cmd_ctx *ctx)
+	struct mdss_mdp_ctl *sctl, struct mdss_mdp_cmd_ctx *ctx)
 {
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
 	bool is_pp_split = is_pingpong_split(ctl->mfd);
+	struct mdss_panel_info *pinfo = NULL;
+
+	if (ctl->panel_data)
+		pinfo = &ctl->panel_data->panel_info;
 
 	MDSS_XLOG(ctx->autorefresh_state);
 
@@ -2668,12 +2840,71 @@
 		ctx->autorefresh_state = MDP_AUTOREFRESH_ON;
 
 	} else {
+
+		/*
+		 * Some panels can require that mdp is within some range
+		 * of the scanlines in order to trigger the tansfer.
+		 * If that is the case, make  sure the panel scanline
+		 * is within the limit to start.
+		 * Acquire an spinlock for this operation to raise the
+		 * priority of this thread and make sure the context
+		 * is maintained, so we can have the less time possible
+		 * between the check of the scanline and the kickoff.
+		 */
+		if (pinfo && pinfo->mdp_koff_thshold) {
+			spin_lock(&ctx->ctlstart_lock);
+			if (wait_for_read_ptr_if_late(ctl, sctl, pinfo)) {
+				spin_unlock(&ctx->ctlstart_lock);
+				usleep_range(pinfo->mdp_koff_delay,
+						pinfo->mdp_koff_delay + 10);
+				spin_lock(&ctx->ctlstart_lock);
+			}
+		}
+
 		/* SW Kickoff */
 		mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
 		MDSS_XLOG(0x11, ctx->autorefresh_state);
+
+		if (pinfo && pinfo->mdp_koff_thshold)
+			spin_unlock(&ctx->ctlstart_lock);
 	}
 }
 
+static int mdss_mdp_cmd_wait4_vsync(struct mdss_mdp_ctl *ctl)
+{
+	int rc = 0;
+	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+
+	if (!ctx) {
+		pr_err("invalid context to wait for vsync\n");
+		return -ENODEV;
+	}
+
+	atomic_inc(&ctx->rdptr_cnt);
+
+	/* enable clks and rd_ptr interrupt */
+	mdss_mdp_setup_vsync(ctx, true);
+
+	/* wait for read pointer */
+	MDSS_XLOG(atomic_read(&ctx->rdptr_cnt));
+	pr_debug("%s: wait for vsync cnt:%d\n",
+		__func__, atomic_read(&ctx->rdptr_cnt));
+
+	rc = mdss_mdp_cmd_wait4readptr(ctx);
+
+	/* wait for 1ms to make sure we are out from trigger window */
+	usleep_range(1000, 1010);
+
+	/* disable rd_ptr interrupt */
+	mdss_mdp_setup_vsync(ctx, false);
+
+	MDSS_XLOG(ctl->num);
+	pr_debug("%s: out from wait for rd_ptr ctl:%d\n", __func__, ctl->num);
+
+	return rc;
+}
+
+
 /*
  * There are 3 partial update possibilities
  * left only ==> enable left pingpong_done
@@ -2802,7 +3033,7 @@
 	}
 
 	/* Kickoff */
-	__mdss_mdp_kickoff(ctl, ctx);
+	__mdss_mdp_kickoff(ctl, sctl, ctx);
 
 	mdss_mdp_cmd_post_programming(ctl);
 
@@ -3039,8 +3270,18 @@
 		 * mode.
 		 */
 		send_panel_events = true;
-		if (mdss_panel_is_power_on_ulp(panel_power_state))
+		if (mdss_panel_is_power_on_ulp(panel_power_state)) {
 			turn_off_clocks = true;
+		} else if (atomic_read(&ctx->koff_cnt)) {
+			/*
+			 * Transition from interactive to low power
+			 * Wait for kickoffs to finish
+			 */
+			MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
+			mdss_mdp_cmd_wait4pingpong(ctl, NULL);
+			if (sctl)
+				mdss_mdp_cmd_wait4pingpong(sctl, NULL);
+		}
 	} else {
 		/* Transitions between low power and ultra low power */
 		if (mdss_panel_is_power_on_ulp(panel_power_state)) {
@@ -3069,6 +3310,8 @@
 				(void *)&ctx->intf_mdp_callback,
 				CTL_INTF_EVENT_FLAG_DEFAULT);
 
+			mdss_mdp_tearcheck_enable(ctl, true);
+
 			ctx->intf_stopped = 0;
 			if (sctx)
 				sctx->intf_stopped = 0;
@@ -3134,6 +3377,7 @@
 	ctl->ops.add_vsync_handler = NULL;
 	ctl->ops.remove_vsync_handler = NULL;
 	ctl->ops.reconfigure = NULL;
+	ctl->ops.wait_for_vsync_fnc = NULL;
 
 end:
 	if (!IS_ERR_VALUE((unsigned long)ret)) {
@@ -3236,6 +3480,7 @@
 	init_completion(&ctx->autorefresh_done);
 	spin_lock_init(&ctx->clk_lock);
 	spin_lock_init(&ctx->koff_lock);
+	spin_lock_init(&ctx->ctlstart_lock);
 	mutex_init(&ctx->clk_mtx);
 	mutex_init(&ctx->mdp_rdptr_lock);
 	mutex_init(&ctx->mdp_wrptr_lock);
@@ -3492,12 +3737,24 @@
 				}
 				ctl->switch_with_handoff = false;
 			}
+			/*
+			 * keep track of vsync, so it can be enabled as part
+			 * of the post switch sequence
+			 */
+			if (ctl->vsync_handler.enabled)
+				ctl->need_vsync_on = true;
 
 			mdss_mdp_ctl_stop(ctl, MDSS_PANEL_POWER_OFF);
 			mdss_mdp_ctl_intf_event(ctl,
 				MDSS_EVENT_DSI_DYNAMIC_SWITCH,
 				(void *) mode, CTL_INTF_EVENT_FLAG_DEFAULT);
 		} else {
+			if (ctl->need_vsync_on &&
+					ctl->ops.add_vsync_handler) {
+				ctl->ops.add_vsync_handler(ctl,
+						&ctl->vsync_handler);
+				ctl->need_vsync_on = false;
+			}
 			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
 		}
 	}
@@ -3524,12 +3781,13 @@
 	ctl->ops.wait_pingpong = mdss_mdp_cmd_wait4pingpong;
 	ctl->ops.add_vsync_handler = mdss_mdp_cmd_add_vsync_handler;
 	ctl->ops.remove_vsync_handler = mdss_mdp_cmd_remove_vsync_handler;
-	ctl->ops.read_line_cnt_fnc = mdss_mdp_cmd_line_count;
+	ctl->ops.read_line_cnt_fnc = mdss_mdp_cmd_line_count_wrapper;
 	ctl->ops.restore_fnc = mdss_mdp_cmd_restore;
 	ctl->ops.early_wake_up_fnc = mdss_mdp_cmd_early_wake_up;
 	ctl->ops.reconfigure = mdss_mdp_cmd_reconfigure;
 	ctl->ops.pre_programming = mdss_mdp_cmd_pre_programming;
 	ctl->ops.update_lineptr = mdss_mdp_cmd_update_lineptr;
+	ctl->ops.wait_for_vsync_fnc = mdss_mdp_cmd_wait4_vsync;
 	pr_debug("%s:-\n", __func__);
 
 	return 0;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
index f18987c..dba3b0d 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
@@ -327,11 +327,11 @@
 	}
 
 	/*
-	 * Currently, only intf_fifo_overflow is
+	 * Currently, intf_fifo_underflow is not
 	 * supported for recovery sequence for video
 	 * mode DSI interface
 	 */
-	if (event != MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW) {
+	if (event == MDP_INTF_DSI_CMD_FIFO_UNDERFLOW) {
 		pr_warn("%s: unsupported recovery event:%d\n",
 					__func__, event);
 		return -EPERM;
@@ -341,6 +341,11 @@
 	pr_debug("%s: ctl num = %d, event = %d\n",
 				__func__, ctl->num, event);
 
+	if (event == MDP_INTF_DSI_PANEL_DEAD) {
+		mdss_fb_report_panel_dead(ctx->ctl->mfd);
+		return 0;
+	}
+
 	pinfo = &ctl->panel_data->panel_info;
 	clk_rate = ((ctl->intf_type == MDSS_INTF_DSI) ?
 			pinfo->mipi.dsi_pclk_rate :
@@ -539,8 +544,10 @@
 			ctl_flush |= (BIT(31) >>
 					(sctx->intf_num - MDSS_MDP_INTF0));
 	}
+	MDSS_XLOG(ctl->intf_num, sctx?sctx->intf_num:0xf00, ctl_flush,
+				mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_FLUSH));
 	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl_flush);
-	MDSS_XLOG(ctl->intf_num, sctx?sctx->intf_num:0xf00, ctl_flush);
+
 }
 
 static inline void video_vsync_irq_enable(struct mdss_mdp_ctl *ctl, bool clear)
@@ -924,6 +931,13 @@
 {
 	int intfs_num, ret = 0;
 
+	if (ctl->cdm) {
+		if (!mdss_mdp_cdm_destroy(ctl->cdm))
+			mdss_mdp_ctl_write(ctl,
+				MDSS_MDP_REG_CTL_FLUSH, BIT(26));
+		ctl->cdm = NULL;
+	}
+
 	intfs_num = ctl->intf_num - MDSS_MDP_INTF0;
 	ret = mdss_mdp_video_intfs_stop(ctl, ctl->panel_data, intfs_num);
 	if (IS_ERR_VALUE((unsigned long)ret)) {
@@ -936,10 +950,6 @@
 	mdss_mdp_ctl_reset(ctl, false);
 	ctl->intf_ctx[MASTER_CTX] = NULL;
 
-	if (ctl->cdm) {
-		mdss_mdp_cdm_destroy(ctl->cdm);
-		ctl->cdm = NULL;
-	}
 	return 0;
 }
 
@@ -1551,13 +1561,43 @@
 	return 0;
 }
 
+static int mdss_mdp_video_splash_handoff(struct mdss_mdp_ctl *ctl)
+{
+	int i, ret = 0;
+	u32 data, flush;
+
+	ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CONT_SPLASH_BEGIN,
+			NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+
+	if (ret) {
+		pr_err("%s:ctl%d failed to handle 'CONT_SPLASH_BEGIN' event\n"
+			, __func__, ctl->num);
+		return ret;
+	}
+
+	/* clear up mixer0 and mixer1 */
+	flush = 0;
+	for (i = 0; i < 2; i++) {
+		data = mdss_mdp_ctl_read(ctl,
+			MDSS_MDP_REG_CTL_LAYER(i));
+		if (data) {
+			mdss_mdp_ctl_write(ctl,
+				MDSS_MDP_REG_CTL_LAYER(i),
+				MDSS_MDP_LM_BORDER_COLOR);
+			flush |= (0x40 << i);
+		}
+	}
+	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush);
+
+	return ret;
+}
+
 int mdss_mdp_video_reconfigure_splash_done(struct mdss_mdp_ctl *ctl,
 	bool handoff)
 {
 	struct mdss_panel_data *pdata;
-	int i, ret = 0, off;
-	u32 data, flush;
-	struct mdss_mdp_video_ctx *ctx;
+	int ret = 0, off;
+	struct mdss_mdp_video_ctx *ctx, *sctx = NULL;
 	struct mdss_mdp_ctl *sctl;
 
 	if (!ctl) {
@@ -1581,41 +1621,43 @@
 	pdata->panel_info.cont_splash_enabled = 0;
 	sctl = mdss_mdp_get_split_ctl(ctl);
 
-	if (sctl)
+	if (sctl) {
 		sctl->panel_data->panel_info.cont_splash_enabled = 0;
-	else if (ctl->panel_data->next && is_pingpong_split(ctl->mfd))
+		sctx = (struct mdss_mdp_video_ctx *) sctl->intf_ctx[MASTER_CTX];
+	} else if (ctl->panel_data->next && is_pingpong_split(ctl->mfd)) {
 		ctl->panel_data->next->panel_info.cont_splash_enabled = 0;
+		sctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[SLAVE_CTX];
+	}
 
 	if (!handoff) {
-		ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CONT_SPLASH_BEGIN,
-				      NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
-		if (ret) {
-			pr_err("%s: Failed to handle 'CONT_SPLASH_BEGIN' event\n"
-				, __func__);
-			return ret;
-		}
+		ret = mdss_mdp_video_splash_handoff(ctl);
 
-		/* clear up mixer0 and mixer1 */
-		flush = 0;
-		for (i = 0; i < 2; i++) {
-			data = mdss_mdp_ctl_read(ctl,
-				MDSS_MDP_REG_CTL_LAYER(i));
-			if (data) {
-				mdss_mdp_ctl_write(ctl,
-					MDSS_MDP_REG_CTL_LAYER(i),
-					MDSS_MDP_LM_BORDER_COLOR);
-				flush |= (0x40 << i);
-			}
-		}
-		mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush);
+		if (!ret && sctl)
+			ret = mdss_mdp_video_splash_handoff(sctl);
+
+		if (ret)
+			return ret;
 
 		mdp_video_write(ctx, MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
+
+		if (sctx)
+			mdp_video_write(sctx,
+				MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
+
+		mdss_mdp_video_timegen_flush(ctl, sctx);
+
 		/* wait for 1 VSYNC for the pipe to be unstaged */
 		msleep(20);
 
 		ret = mdss_mdp_ctl_intf_event(ctl,
 			MDSS_EVENT_CONT_SPLASH_FINISH, NULL,
 			CTL_INTF_EVENT_FLAG_DEFAULT);
+
+		if (!ret && sctl)
+			ret = mdss_mdp_ctl_intf_event(sctl,
+				MDSS_EVENT_CONT_SPLASH_FINISH, NULL,
+				CTL_INTF_EVENT_FLAG_DEFAULT);
+
 	}
 
 	return ret;
@@ -1668,7 +1710,7 @@
 static void mdss_mdp_fetch_start_config(struct mdss_mdp_video_ctx *ctx,
 		struct mdss_mdp_ctl *ctl)
 {
-	int fetch_start, fetch_enable, v_total, h_total;
+	int fetch_start = 0, fetch_enable, v_total, h_total;
 	struct mdss_data_type *mdata;
 	struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
 
@@ -1677,6 +1719,15 @@
 	pinfo->prg_fet = mdss_mdp_get_prefetch_lines(pinfo);
 	if (!pinfo->prg_fet) {
 		pr_debug("programmable fetch is not needed/supported\n");
+
+		fetch_enable = mdp_video_read(ctx, MDSS_MDP_REG_INTF_CONFIG);
+		fetch_enable &= ~BIT(31);
+
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_CONFIG, fetch_enable);
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_PROG_FETCH_START,
+						fetch_start);
+
+		MDSS_XLOG(ctx->intf_num, fetch_enable, fetch_start);
 		return;
 	}
 
@@ -2181,6 +2232,7 @@
 	ctl->ops.config_fps_fnc = mdss_mdp_video_config_fps;
 	ctl->ops.early_wake_up_fnc = mdss_mdp_video_early_wake_up;
 	ctl->ops.update_lineptr = mdss_mdp_video_lineptr_ctrl;
+	ctl->ops.wait_for_vsync_fnc = NULL;
 
 	return 0;
 }
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
index 46f25dd..3d63890 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
@@ -518,7 +518,9 @@
 	}
 
 	if (ctl->cdm) {
-		mdss_mdp_cdm_destroy(ctl->cdm);
+		if (!mdss_mdp_cdm_destroy(ctl->cdm))
+			mdss_mdp_ctl_write(ctl,
+				MDSS_MDP_REG_CTL_FLUSH, BIT(26));
 		ctl->cdm = NULL;
 	}
 	return 0;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index d898e7e..a7692f0 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -103,14 +103,15 @@
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
 
 	if ((layer->z_order != HW_CURSOR_STAGE(mdata))
+			|| layer->flags & MDP_LAYER_FLIP_LR
 			|| layer->src_rect.w > mdata->max_cursor_size
 			|| layer->src_rect.h > mdata->max_cursor_size
 			|| layer->src_rect.w != layer->dst_rect.w
 			|| layer->src_rect.h != layer->dst_rect.h
 			|| !mdata->ncursor_pipes) {
-		pr_err("Incorrect cursor configs for pipe:%d, cursor_pipes:%d, z_order:%d\n",
+		pr_err("Incorrect cursor configs for pipe:0x%x, ncursor_pipes:%d, z_order:%d, flags:0x%x\n",
 				layer->pipe_ndx, mdata->ncursor_pipes,
-				layer->z_order);
+				layer->z_order, layer->flags);
 		pr_err("src:{%d,%d,%d,%d}, dst:{%d,%d,%d,%d}\n",
 				layer->src_rect.x, layer->src_rect.y,
 				layer->src_rect.w, layer->src_rect.h,
@@ -344,12 +345,18 @@
 	 */
 	if (pipe->csc_coeff_set != layer->color_space) {
 		src_fmt = mdss_mdp_get_format_params(layer->buffer.format);
-		if (pipe->src_fmt->is_yuv && src_fmt && src_fmt->is_yuv) {
-			status = -EPERM;
-			pr_err("csc change is not permitted on used pipe\n");
+		if (!src_fmt) {
+			pr_err("Invalid layer format %d\n",
+						layer->buffer.format);
+			status = -EINVAL;
+		} else {
+			if (pipe->src_fmt->is_yuv && src_fmt &&
+							src_fmt->is_yuv) {
+				status = -EPERM;
+				pr_err("csc change is not permitted on used pipe\n");
+			}
 		}
 	}
-
 	return status;
 }
 
@@ -484,9 +491,6 @@
 
 	pipe->comp_ratio = layer->buffer.comp_ratio;
 
-	if (mfd->panel_orientation)
-		layer->flags ^= mfd->panel_orientation;
-
 	pipe->mixer_left = mixer;
 	pipe->mfd = mfd;
 	pipe->play_cnt = 0;
@@ -561,6 +565,15 @@
 		goto end;
 	}
 
+	/* scaling is not allowed for solid_fill layers */
+	if ((pipe->flags & MDP_SOLID_FILL) &&
+		((pipe->src.w != pipe->dst.w) ||
+			(pipe->src.h != pipe->dst.h))) {
+		pr_err("solid fill pipe:%d cannot have scaling\n", pipe->num);
+		ret = -EINVAL;
+		goto end;
+	}
+
 	/*
 	 * unstage the pipe if it's current z_order does not match with new
 	 * z_order because client may only call the validate.
@@ -625,13 +638,6 @@
 	pipe->multirect.mode = vinfo->multirect.mode;
 	pipe->mixer_stage = layer->z_order;
 
-	if (mfd->panel_orientation & MDP_FLIP_LR)
-		pipe->dst.x = pipe->mixer_left->width - pipe->dst.x -
-			pipe->dst.w;
-	if (mfd->panel_orientation & MDP_FLIP_UD)
-		pipe->dst.y = pipe->mixer_left->height - pipe->dst.y -
-			pipe->dst.h;
-
 	memcpy(&pipe->layer, layer, sizeof(struct mdp_input_layer));
 
 	mdss_mdp_overlay_set_chroma_sample(pipe);
@@ -1175,6 +1181,11 @@
 	pr_debug("pipe count:: secure display:%d non-secure:%d\n",
 		sd_pipes, nonsd_pipes);
 
+	if (mdss_get_sd_client_cnt() && !mdp5_data->sd_enabled) {
+		pr_err("Secure session already enabled for other client\n");
+		return -EINVAL;
+	}
+
 	mdp5_data->sd_transition_state = SD_TRANSITION_NONE;
 	if (!__is_sd_state_valid(sd_pipes, nonsd_pipes, panel_type,
 		mdp5_data->sd_enabled)) {
@@ -1188,7 +1199,14 @@
 	} else if (mdp5_data->sd_enabled && !sd_pipes) {
 		mdp5_data->sd_transition_state =
 			SD_TRANSITION_SECURE_TO_NON_SECURE;
+	} else if (mdp5_data->ctl->is_video_mode &&
+		((sd_pipes && !mdp5_data->sd_enabled) ||
+		(!sd_pipes && mdp5_data->sd_enabled)) &&
+		!mdp5_data->cache_null_commit) {
+		pr_err("NULL commit missing before display secure session entry/exit\n");
+		ret = -EINVAL;
 	}
+
 	return ret;
 }
 
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index 672b634..af6e4ce 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -2904,6 +2904,7 @@
 	if (ctl->ops.wait_pingpong && mdp5_data->mdata->serialize_wait4pp)
 		mdss_mdp_display_wait4pingpong(ctl, true);
 
+	mdp5_data->cache_null_commit = list_empty(&mdp5_data->pipes_used);
 	sd_transition_state = mdp5_data->sd_transition_state;
 	if (sd_transition_state != SD_TRANSITION_NONE) {
 		ret = __config_secure_display(mdp5_data);
@@ -2936,6 +2937,7 @@
 	ATRACE_BEGIN("sspp_programming");
 	ret = __overlay_queue_pipes(mfd);
 	ATRACE_END("sspp_programming");
+
 	mutex_unlock(&mdp5_data->list_lock);
 
 	mdp5_data->kickoff_released = false;
@@ -3864,7 +3866,8 @@
 	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 	struct dynamic_fps_data data = {0};
 
-	if (!mdp5_data->ctl || !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)) {
+	if (!mdp5_data->ctl || !mdss_mdp_ctl_is_power_on(mdp5_data->ctl) ||
+			mdss_panel_is_power_off(mfd->panel_power_state)) {
 		pr_debug("panel is off\n");
 		return count;
 	}
@@ -6649,10 +6652,6 @@
 
 	mfd->panel_orientation = mfd->panel_info->panel_orientation;
 
-	if ((mfd->panel_info->panel_orientation & MDP_FLIP_LR) &&
-	    (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY))
-		mdp5_data->mixer_swap = true;
-
 	rc = sysfs_create_group(&dev->kobj, &mdp_overlay_sysfs_group);
 	if (rc) {
 		pr_err("vsync sysfs group creation failed, ret=%d\n", rc);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pipe.c b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
index 3a8df20..4231af4 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pipe.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
@@ -934,17 +934,18 @@
 		data = readl_relaxed(mdata->mdp_base +
 			MDSS_MDP_REG_SMP_ALLOC_W0 + off);
 		client_id = (data >> s) & 0xFF;
-		if (test_bit(i, mdata->mmb_alloc_map)) {
-			/*
-			 * Certain pipes may have a dedicated set of
-			 * SMP MMBs statically allocated to them. In
-			 * such cases, we do not need to do anything
-			 * here.
-			 */
-			pr_debug("smp mmb %d already assigned to pipe %d (client_id %d)\n"
-				, i, pipe ? pipe->num : -1, client_id);
-			continue;
-		}
+		if (i < ARRAY_SIZE(mdata->mmb_alloc_map))
+			if (test_bit(i, mdata->mmb_alloc_map)) {
+				/*
+				 * Certain pipes may have a dedicated set of
+				 * SMP MMBs statically allocated to them. In
+				 * such cases, we do not need to do anything
+				 * here.
+				 */
+				pr_debug("smp mmb %d already assigned to pipe %d (client_id %d)\n"
+					, i, pipe ? pipe->num : -1, client_id);
+				continue;
+			}
 
 		if (client_id) {
 			if (client_id != prev_id) {
@@ -1011,8 +1012,10 @@
 	u32 mask, reg_val, reg_val_lvl, i, vbif_qos;
 	u32 reg_high;
 	bool is_nrt_vbif = mdss_mdp_is_nrt_vbif_client(mdata, pipe);
+	u32 *vbif_qos_ptr = is_realtime ? mdata->vbif_rt_qos :
+		mdata->vbif_nrt_qos;
 
-	if (mdata->npriority_lvl == 0)
+	if ((mdata->npriority_lvl == 0) || !vbif_qos_ptr)
 		return;
 
 	if (test_bit(MDSS_QOS_REMAPPER, mdata->mdss_qos_map)) {
@@ -1028,8 +1031,7 @@
 				is_nrt_vbif);
 
 			mask = 0x3 << (pipe->xin_id * 4);
-			vbif_qos = is_realtime ?
-				mdata->vbif_rt_qos[i] : mdata->vbif_nrt_qos[i];
+			vbif_qos = vbif_qos_ptr[i];
 
 			reg_val &= ~(mask);
 			reg_val |= vbif_qos << (pipe->xin_id * 4);
@@ -1053,8 +1055,7 @@
 
 			mask = 0x3 << (pipe->xin_id * 2);
 			reg_val &= ~(mask);
-			vbif_qos = is_realtime ?
-				mdata->vbif_rt_qos[i] : mdata->vbif_nrt_qos[i];
+			vbif_qos = vbif_qos_ptr[i];
 			reg_val |= vbif_qos << (pipe->xin_id * 2);
 			MDSS_VBIF_WRITE(mdata, MDSS_VBIF_QOS_REMAP_BASE + i*4,
 				reg_val, is_nrt_vbif);
@@ -2072,8 +2073,9 @@
 	u32 cdp_settings = 0x0;
 	bool is_rotator = (pipe->mixer_left && pipe->mixer_left->rotator_mode);
 
-	/* Disable CDP for rotator pipe in v1 */
-	if (is_rotator && mdss_has_quirk(mdata, MDSS_QUIRK_ROTCDP))
+	/* Disable CDP for rotator pipe or if not requested for the target */
+	if (!mdata->enable_cdp || (is_rotator &&
+			mdss_has_quirk(mdata, MDSS_QUIRK_ROTCDP)))
 		goto exit;
 
 	cdp_settings = MDSS_MDP_CDP_ENABLE;
@@ -2290,6 +2292,9 @@
 		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC3_ADDR, addr[2]);
 	}
 
+	MDSS_XLOG(pipe->num, pipe->multirect.num, pipe->mixer_left->num,
+		pipe->play_cnt, addr[0], addr[1], addr[2], addr[3]);
+
 	return 0;
 }
 
@@ -2311,7 +2316,7 @@
 
 	/* support ARGB color format only */
 	unpack = (C3_ALPHA << 24) | (C2_R_Cr << 16) |
-		(C1_B_Cb << 8) | (C0_G_Y << 0);
+		(C0_G_Y << 8) | (C1_B_Cb << 0);
 	if (pipe->scaler.enable)
 		opmode |= (1 << 31);
 
@@ -2677,9 +2682,6 @@
 		goto update_nobuf;
 	}
 
-	MDSS_XLOG(pipe->num, pipe->multirect.num, pipe->mixer_left->num,
-						pipe->play_cnt, 0x222);
-
 	if (params_changed) {
 		pipe->params_changed = 0;
 
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index 13afa46..8f4d437 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -2445,7 +2445,7 @@
 	}
 
 	if (flags & PP_FLAGS_DIRTY_DITHER) {
-		if (!pp_ops[DITHER].pp_set_config) {
+		if (!pp_ops[DITHER].pp_set_config && addr) {
 			pp_dither_config(addr, pp_sts,
 				&mdss_pp_res->dither_disp_cfg[disp_num]);
 		} else {
@@ -5156,7 +5156,7 @@
 				u32 block)
 {
 	int ret = 0;
-	u32 sum;
+	int sum = 0;
 	char __iomem *v_base = NULL;
 	unsigned long flag;
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
@@ -5186,7 +5186,8 @@
 		else if (block == SSPP_VIG)
 			v_base = ctl_base +
 				MDSS_MDP_REG_VIG_HIST_CTL_BASE;
-		sum = pp_hist_read(v_base, hist_info);
+		if (v_base)
+			sum = pp_hist_read(v_base, hist_info);
 	}
 	writel_relaxed(0, hist_info->base);
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c
index 44817c3..cf19501 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c
@@ -149,7 +149,7 @@
 {
 	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
-	int rc, ret;
+	int ret;
 
 	/*
 	 * iommu dynamic attach for following conditions.
@@ -166,26 +166,41 @@
 		return -EPERM;
 	}
 
-	rc = mdss_smmu_map(MDSS_IOMMU_DOMAIN_UNSECURE,
+	/*
+	 * Putting handoff pending to false to ensure smmu attach happens
+	 * with early flag attribute
+	 */
+	mdata->handoff_pending = false;
+
+	ret = mdss_smmu_set_attribute(MDSS_IOMMU_DOMAIN_UNSECURE, EARLY_MAP, 1);
+	if (ret) {
+		pr_debug("mdss set attribute failed for early map\n");
+		goto end;
+	}
+
+	ret = mdss_iommu_ctrl(1);
+	if (IS_ERR_VALUE((unsigned long)ret)) {
+		pr_err("mdss iommu attach failed\n");
+		goto end;
+	}
+
+	ret = mdss_smmu_map(MDSS_IOMMU_DOMAIN_UNSECURE,
 				mdp5_data->splash_mem_addr,
 				mdp5_data->splash_mem_addr,
 				mdp5_data->splash_mem_size,
 				IOMMU_READ | IOMMU_NOEXEC);
-	if (rc) {
-		pr_debug("iommu memory mapping failed rc=%d\n", rc);
+	if (ret) {
+		pr_err("iommu memory mapping failed ret=%d\n", ret);
 	} else {
-		ret = mdss_iommu_ctrl(1);
-		if (IS_ERR_VALUE((unsigned long)ret)) {
-			pr_err("mdss iommu attach failed\n");
-			mdss_smmu_unmap(MDSS_IOMMU_DOMAIN_UNSECURE,
-					mdp5_data->splash_mem_addr,
-					mdp5_data->splash_mem_size);
-		} else {
-			mfd->splash_info.iommu_dynamic_attached = true;
-		}
+		pr_debug("iommu map passed for PA=VA\n");
+		mfd->splash_info.iommu_dynamic_attached = true;
 	}
 
-	return rc;
+	ret = mdss_smmu_set_attribute(MDSS_IOMMU_DOMAIN_UNSECURE, EARLY_MAP, 0);
+end:
+	mdata->handoff_pending = true;
+
+	return ret;
 }
 
 static void mdss_mdp_splash_unmap_splash_mem(struct msm_fb_data_type *mfd)
@@ -193,12 +208,10 @@
 	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 
 	if (mfd->splash_info.iommu_dynamic_attached) {
-
 		mdss_smmu_unmap(MDSS_IOMMU_DOMAIN_UNSECURE,
 				mdp5_data->splash_mem_addr,
 				mdp5_data->splash_mem_size);
 		mdss_iommu_ctrl(0);
-
 		mfd->splash_info.iommu_dynamic_attached = false;
 	}
 }
diff --git a/drivers/video/fbdev/msm/mdss_mdp_trace.h b/drivers/video/fbdev/msm/mdss_mdp_trace.h
index c100e9c..35a126b 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_trace.h
+++ b/drivers/video/fbdev/msm/mdss_mdp_trace.h
@@ -295,6 +295,42 @@
 			__entry->ib_quota)
 );
 
+TRACE_EVENT(mdp_compare_bw,
+	TP_PROTO(unsigned long long new_ab, unsigned long long new_ib,
+		unsigned long long new_wb,
+		unsigned long long old_ab, unsigned long long old_ib,
+		unsigned long long old_wb,
+		u32 params_changed, bool update_bw),
+	TP_ARGS(new_ab, new_ib, new_wb,
+					old_ab, old_ib, old_wb,
+					params_changed, update_bw),
+	TP_STRUCT__entry(
+			__field(u64, new_ab)
+			__field(u64, new_ib)
+			__field(u64, new_wb)
+			__field(u64, old_ab)
+			__field(u64, old_ib)
+			__field(u64, old_wb)
+			__field(u32, params_changed)
+			__field(bool, update_bw)
+	),
+	TP_fast_assign(
+			__entry->new_ab = new_ab;
+			__entry->new_ib = new_ib;
+			__entry->new_wb = new_wb;
+			__entry->old_ab = old_ab;
+			__entry->old_ib = old_ib;
+			__entry->old_wb = old_wb;
+			__entry->params_changed = params_changed;
+			__entry->update_bw = update_bw;
+	),
+	TP_printk("[ab,ib,wb]new[%llu,%llu,%llu]old[%llu,%llu,%llu]chgd:%d %d",
+		__entry->new_ab, __entry->new_ib, __entry->new_wb,
+		__entry->old_ab, __entry->old_ib,
+		__entry->old_wb, __entry->params_changed,
+		__entry->update_bw)
+);
+
 TRACE_EVENT(mdp_misr_crc,
 	TP_PROTO(u32 block_id, u32 vsync_cnt, u32 crc),
 	TP_ARGS(block_id, vsync_cnt, crc),
@@ -332,6 +368,22 @@
 			__entry->koff_cnt)
 );
 
+TRACE_EVENT(mdp_cmd_readptr_done,
+	TP_PROTO(u32 ctl_num, int koff_cnt),
+	TP_ARGS(ctl_num, koff_cnt),
+	TP_STRUCT__entry(
+			__field(u32, ctl_num)
+			__field(int, koff_cnt)
+	),
+	TP_fast_assign(
+			__entry->ctl_num = ctl_num;
+			__entry->koff_cnt = koff_cnt;
+	),
+	TP_printk("ctl num:%d kickoff:%d",
+			__entry->ctl_num,
+			__entry->koff_cnt)
+);
+
 TRACE_EVENT(mdp_cmd_release_bw,
 	TP_PROTO(u32 ctl_num),
 	TP_ARGS(ctl_num),
diff --git a/drivers/video/fbdev/msm/mdss_mdp_util.c b/drivers/video/fbdev/msm/mdss_mdp_util.c
index c030109..dd73d2c 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_util.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_util.c
@@ -969,7 +969,9 @@
 		 * be filled due to map call which will be unmapped above.
 		 *
 		 */
-		pr_debug("skip memory unmapping for secure display content\n");
+		if (data->ihandle)
+			ion_free(iclient, data->ihandle);
+		pr_debug("free memory handle for secure display/camera content\n");
 	} else {
 		return -ENOMEM;
 	}
@@ -1048,19 +1050,18 @@
 			ret = 0;
 			goto done;
 		} else {
-			struct ion_handle *ihandle = NULL;
 			struct sg_table *sg_ptr = NULL;
 
+			data->ihandle = ion_import_dma_buf_fd(iclient,
+					img->memory_id);
+			if (IS_ERR_OR_NULL(data->ihandle)) {
+				ret = -EINVAL;
+				pr_err("ion import buffer failed\n");
+				data->ihandle = NULL;
+				goto done;
+			}
 			do {
-				ihandle = ion_import_dma_buf_fd(iclient,
-							     img->memory_id);
-				if (IS_ERR_OR_NULL(ihandle)) {
-					ret = -EINVAL;
-					pr_err("ion import buffer failed\n");
-					break;
-				}
-
-				sg_ptr = ion_sg_table(iclient, ihandle);
+				sg_ptr = ion_sg_table(iclient, data->ihandle);
 				if (sg_ptr == NULL) {
 					pr_err("ion sg table get failed\n");
 					ret = -EINVAL;
@@ -1086,8 +1087,6 @@
 				ret = 0;
 			} while (0);
 
-			if (!IS_ERR_OR_NULL(ihandle))
-				ion_free(iclient, ihandle);
 			return ret;
 		}
 	}
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
index c9e7e61..c66d5ab 100644
--- a/drivers/video/fbdev/msm/mdss_panel.h
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -60,6 +60,7 @@
 #define WRITEBACK_PANEL		10	/* Wifi display */
 #define LVDS_PANEL		11	/* LVDS */
 #define EDP_PANEL		12	/* LVDS */
+#define SPI_PANEL		13
 
 #define DSC_PPS_LEN		128
 
@@ -108,6 +109,7 @@
 	MDSS_PANEL_INTF_DSI,
 	MDSS_PANEL_INTF_EDP,
 	MDSS_PANEL_INTF_HDMI,
+	MDSS_PANEL_INTF_SPI,
 };
 
 enum {
@@ -118,6 +120,12 @@
 };
 
 enum {
+	MDSS_PANEL_BLANK_BLANK = 0,
+	MDSS_PANEL_BLANK_UNBLANK,
+	MDSS_PANEL_BLANK_LOW_POWER,
+};
+
+enum {
 	MDSS_PANEL_LOW_PERSIST_MODE_OFF = 0,
 	MDSS_PANEL_LOW_PERSIST_MODE_ON,
 };
@@ -167,6 +175,7 @@
 
 #define MDP_INTF_DSI_CMD_FIFO_UNDERFLOW		0x0001
 #define MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW	0x0002
+#define MDP_INTF_DSI_PANEL_DEAD			0x0003
 
 
 enum {
@@ -429,6 +438,10 @@
 	char frame_rate;	/* fps */
 };
 
+struct spi_panel_info {
+	char frame_rate;
+};
+
 /**
  * struct dynamic_fps_data - defines dynamic fps related data
  * @hfp: horizontal front porch
@@ -659,6 +672,10 @@
 	u32 saved_fporch;
 	/* current fps, once is programmed in hw */
 	int current_fps;
+	u32 mdp_koff_thshold_low;
+	u32 mdp_koff_thshold_high;
+	bool mdp_koff_thshold;
+	u32 mdp_koff_delay;
 
 	int panel_max_fps;
 	int panel_max_vtotal;
@@ -676,6 +693,7 @@
 	u32 partial_update_roi_merge;
 	struct ion_handle *splash_ihdl;
 	int panel_power_state;
+	int blank_state;
 	int compression_mode;
 
 	uint32_t panel_dead;
@@ -735,6 +753,7 @@
 	struct lcd_panel_info lcdc;
 	struct fbc_panel_info fbc;
 	struct mipi_panel_info mipi;
+	struct spi_panel_info spi;
 	struct lvds_panel_info lvds;
 	struct edp_panel_info edp;
 
@@ -874,6 +893,9 @@
 			frame_rate = panel_info->lcdc.frame_rate;
 			break;
 		}
+	case SPI_PANEL:
+		frame_rate = panel_info->spi.frame_rate;
+		break;
 	default:
 		pixel_total = (panel_info->lcdc.h_back_porch +
 			  panel_info->lcdc.h_front_porch +
diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c
index 972c8de..c213f70 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.c
+++ b/drivers/video/fbdev/msm/mdss_smmu.c
@@ -180,6 +180,27 @@
 	return rc;
 }
 
+int mdss_smmu_set_attribute(int domain, int flag, int val)
+{
+	int rc = 0, domain_attr = 0;
+	struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+	if (!mdss_smmu) {
+		pr_err("not able to get smmu context\n");
+		return -EINVAL;
+	}
+
+	if (flag == EARLY_MAP)
+		domain_attr = DOMAIN_ATTR_EARLY_MAP;
+	else
+		goto end;
+
+	rc = iommu_domain_set_attr(mdss_smmu->mmu_mapping->domain,
+			domain_attr, &val);
+end:
+	return rc;
+}
+
 /*
  * mdss_smmu_v2_attach()
  *
@@ -474,24 +495,30 @@
 		(struct mdss_smmu_client *)user_data;
 	u32 fsynr1, mid, i;
 
-	if (!mdss_smmu || !mdss_smmu->mmu_base)
+	if (!mdss_smmu)
 		goto end;
 
-	fsynr1 = readl_relaxed(mdss_smmu->mmu_base + SMMU_CBN_FSYNR1);
-	mid = fsynr1 & 0xff;
-	pr_err("mdss_smmu: iova:0x%lx flags:0x%x fsynr1: 0x%x mid: 0x%x\n",
-		iova, flags, fsynr1, mid);
+	if (mdss_smmu->mmu_base) {
+		fsynr1 = readl_relaxed(mdss_smmu->mmu_base + SMMU_CBN_FSYNR1);
+		mid = fsynr1 & 0xff;
+		pr_err("mdss_smmu: iova:0x%lx flags:0x%x fsynr1: 0x%x mid: 0x%x\n",
+			iova, flags, fsynr1, mid);
 
-	/* get domain id information */
-	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
-		if (mdss_smmu == mdss_smmu_get_cb(i))
-			break;
+		/* get domain id information */
+		for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+			if (mdss_smmu == mdss_smmu_get_cb(i))
+				break;
+		}
+
+		if (i == MDSS_IOMMU_MAX_DOMAIN)
+			goto end;
+
+		mdss_mdp_debug_mid(mid);
+	} else {
+		pr_err("mdss_smmu: iova:0x%lx flags:0x%x\n",
+			iova, flags);
+		MDSS_XLOG_TOUT_HANDLER("mdp");
 	}
-
-	if (i == MDSS_IOMMU_MAX_DOMAIN)
-		goto end;
-
-	mdss_mdp_debug_mid(mid);
 end:
 	return -ENODEV;
 }
@@ -693,13 +720,13 @@
 }
 
 static struct mdss_smmu_domain mdss_mdp_unsec = {
-	"mdp_0", MDSS_IOMMU_DOMAIN_UNSECURE, SZ_128K, (SZ_4G - SZ_128M)};
+	"mdp_0", MDSS_IOMMU_DOMAIN_UNSECURE, SZ_128M, (SZ_4G - SZ_128M)};
 static struct mdss_smmu_domain mdss_rot_unsec = {
-	NULL, MDSS_IOMMU_DOMAIN_ROT_UNSECURE, SZ_128K, (SZ_4G - SZ_128M)};
+	NULL, MDSS_IOMMU_DOMAIN_ROT_UNSECURE, SZ_128M, (SZ_4G - SZ_128M)};
 static struct mdss_smmu_domain mdss_mdp_sec = {
-	"mdp_1", MDSS_IOMMU_DOMAIN_SECURE, SZ_128K, (SZ_4G - SZ_128M)};
+	"mdp_1", MDSS_IOMMU_DOMAIN_SECURE, SZ_128M, (SZ_4G - SZ_128M)};
 static struct mdss_smmu_domain mdss_rot_sec = {
-	NULL, MDSS_IOMMU_DOMAIN_ROT_SECURE, SZ_128K, (SZ_4G - SZ_128M)};
+	NULL, MDSS_IOMMU_DOMAIN_ROT_SECURE, SZ_128M, (SZ_4G - SZ_128M)};
 
 static const struct of_device_id mdss_smmu_dt_match[] = {
 	{ .compatible = "qcom,smmu_mdp_unsec", .data = &mdss_mdp_unsec},
@@ -840,14 +867,13 @@
 
 	mdss_smmu->dev = dev;
 
+	iommu_set_fault_handler(mdss_smmu->mmu_mapping->domain,
+			mdss_smmu_fault_handler, mdss_smmu);
 	address = of_get_address_by_name(pdev->dev.of_node, "mmu_cb", 0, 0);
 	if (address) {
 		size = address + 1;
 		mdss_smmu->mmu_base = ioremap(be32_to_cpu(*address),
 			be32_to_cpu(*size));
-		if (mdss_smmu->mmu_base)
-			iommu_set_fault_handler(mdss_smmu->mmu_mapping->domain,
-				mdss_smmu_fault_handler, mdss_smmu);
 	} else {
 		pr_debug("unable to map context bank base\n");
 	}
diff --git a/drivers/video/fbdev/msm/mdss_smmu.h b/drivers/video/fbdev/msm/mdss_smmu.h
index 091af3b..97f0933 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.h
+++ b/drivers/video/fbdev/msm/mdss_smmu.h
@@ -42,6 +42,11 @@
 
 void mdss_smmu_register(struct device *dev);
 int mdss_smmu_init(struct mdss_data_type *mdata, struct device *dev);
+int mdss_smmu_set_attribute(int domain, int flag, int val);
+
+enum smmu_attributes {
+	EARLY_MAP
+};
 
 static inline int mdss_smmu_dma_data_direction(int dir)
 {
diff --git a/drivers/video/fbdev/msm/mdss_spi_client.c b/drivers/video/fbdev/msm/mdss_spi_client.c
new file mode 100644
index 0000000..541e382
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_spi_client.c
@@ -0,0 +1,198 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/qpnp/pin.h>
+#include <linux/delay.h>
+
+#include "mdss_spi_client.h"
+
+#define MAX_READ_SPEED_HZ	9600000
+#define SPI_PANEL_COMMAND_LEN	1
+static struct spi_device *mdss_spi_client;
+
+int mdss_spi_read_data(u8 reg_addr, u8 *data, u8 len)
+{
+	int rc = 0;
+	u32 max_speed_hz;
+	u8 memory_write_reg = 0x2c;
+	u8 empty_pack[] = {0x29, 0x29, 0x29};
+	struct spi_transfer t[4] = {
+		[0] = {
+			.tx_buf = &reg_addr,
+			.len = 1,
+		},
+		[1] = {
+			.rx_buf = data,
+			.len = len,
+		},
+		[2] = {
+			.tx_buf = &empty_pack,
+			.len = 3,
+		},
+		[3] = {
+			.tx_buf = &memory_write_reg,
+			.len = 1,
+		}
+	};
+	struct spi_message m;
+
+	if (!mdss_spi_client) {
+		pr_err("%s: spi client not available\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_spi_client->bits_per_word = 8;
+	max_speed_hz = mdss_spi_client->max_speed_hz;
+	mdss_spi_client->max_speed_hz = MAX_READ_SPEED_HZ;
+
+	spi_message_init(&m);
+	spi_message_add_tail(&t[0], &m);
+	spi_message_add_tail(&t[1], &m);
+	rc = spi_sync(mdss_spi_client, &m);
+
+	spi_message_init(&m);
+	spi_message_add_tail(&t[2], &m);
+	rc = spi_sync(mdss_spi_client, &m);
+	spi_message_init(&m);
+	spi_message_add_tail(&t[3], &m);
+	rc = spi_sync(mdss_spi_client, &m);
+	mdss_spi_client->max_speed_hz = max_speed_hz;
+
+	return rc;
+}
+
+int mdss_spi_tx_command(const void *buf)
+{
+	int rc = 0;
+	struct spi_transfer t = {
+		.tx_buf = buf,
+		.len    = SPI_PANEL_COMMAND_LEN,
+	};
+	struct spi_message m;
+
+	if (!mdss_spi_client) {
+		pr_err("%s: spi client not available\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_spi_client->bits_per_word = 8;
+
+	spi_message_init(&m);
+	spi_message_add_tail(&t, &m);
+	rc = spi_sync(mdss_spi_client, &m);
+
+	return rc;
+}
+
+int mdss_spi_tx_parameter(const void *buf, size_t len)
+{
+	int rc = 0;
+	struct spi_transfer t = {
+		.tx_buf = buf,
+		.len    = len,
+	};
+	struct spi_message m;
+
+	if (!mdss_spi_client) {
+		pr_err("%s: spi client not available\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_spi_client->bits_per_word = 8;
+
+	spi_message_init(&m);
+	spi_message_add_tail(&t, &m);
+	rc = spi_sync(mdss_spi_client, &m);
+
+	return rc;
+}
+
+int mdss_spi_tx_pixel(const void *buf, size_t len)
+{
+	int rc = 0;
+	struct spi_transfer t = {
+		.tx_buf = buf,
+		.len    = len,
+		};
+	struct spi_message m;
+
+	if (!mdss_spi_client) {
+		pr_err("%s: spi client not available\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_spi_client->bits_per_word = 16;
+	spi_message_init(&m);
+	spi_message_add_tail(&t, &m);
+	rc = spi_sync(mdss_spi_client, &m);
+
+	return rc;
+}
+
+static int mdss_spi_client_probe(struct spi_device *spidev)
+{
+	int irq;
+	int cs;
+	int cpha, cpol, cs_high;
+	u32 max_speed;
+	struct device_node *np;
+
+	irq = spidev->irq;
+	cs = spidev->chip_select;
+	cpha = (spidev->mode & SPI_CPHA) ? 1:0;
+	cpol = (spidev->mode & SPI_CPOL) ? 1:0;
+	cs_high = (spidev->mode & SPI_CS_HIGH) ? 1:0;
+	max_speed = spidev->max_speed_hz;
+	np = spidev->dev.of_node;
+	pr_debug("cs[%x] CPHA[%x] CPOL[%x] CS_HIGH[%x] Max_speed[%d]\n",
+		cs, cpha, cpol, cs_high, max_speed);
+	mdss_spi_client = spidev;
+
+	return 0;
+}
+
+
+static const struct of_device_id mdss_spi_dt_match[] = {
+	{ .compatible = "qcom,mdss-spi-client" },
+	{},
+};
+
+static struct spi_driver mdss_spi_client_driver = {
+	.probe = mdss_spi_client_probe,
+	.driver = {
+		.name = "mdss-spi-client",
+		.owner  = THIS_MODULE,
+		.of_match_table = mdss_spi_dt_match,
+	},
+};
+
+static int __init mdss_spi_init(void)
+{
+	int ret;
+
+	ret = spi_register_driver(&mdss_spi_client_driver);
+
+	return 0;
+}
+module_init(mdss_spi_init);
+
+static void __exit mdss_spi_exit(void)
+{
+	spi_unregister_driver(&mdss_spi_client_driver);
+}
+module_exit(mdss_spi_exit);
+
diff --git a/drivers/video/fbdev/msm/mdss_spi_client.h b/drivers/video/fbdev/msm/mdss_spi_client.h
new file mode 100644
index 0000000..2d15625
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_spi_client.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_SPI_CLINET_H__
+#define __MDSS_SPI_CLINET_H__
+
+int mdss_spi_tx_command(const void *buf);
+int mdss_spi_tx_parameter(const void *buf, size_t len);
+int mdss_spi_tx_pixel(const void *buf, size_t len);
+int mdss_spi_read_data(u8 reg_addr, u8 *data, u8 len);
+#endif /* End of __MDSS_SPI_CLINET_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_spi_panel.c b/drivers/video/fbdev/msm/mdss_spi_panel.c
new file mode 100644
index 0000000..86a1c1c
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_spi_panel.c
@@ -0,0 +1,1713 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/qpnp/pin.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/leds.h>
+#include <linux/qpnp/pwm.h>
+#include <linux/of_device.h>
+
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_spi_panel.h"
+#include "mdss_spi_client.h"
+#include "mdp3.h"
+
+DEFINE_LED_TRIGGER(bl_led_trigger);
+static int mdss_spi_panel_reset(struct mdss_panel_data *pdata, int enable)
+{
+	struct spi_panel_data *ctrl_pdata = NULL;
+	struct mdss_panel_info *pinfo = NULL;
+	int i, rc = 0;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	if (!gpio_is_valid(ctrl_pdata->rst_gpio)) {
+		pr_debug("%s:%d, reset line not configured\n",
+			   __func__, __LINE__);
+		return rc;
+	}
+
+	if (!gpio_is_valid(ctrl_pdata->disp_dc_gpio)) {
+		pr_debug("%s:%d, dc line not configured\n",
+			   __func__, __LINE__);
+		return rc;
+	}
+
+	pr_debug("%s: enable = %d\n", __func__, enable);
+	pinfo = &(ctrl_pdata->panel_data.panel_info);
+
+	if (enable) {
+		rc = gpio_request(ctrl_pdata->rst_gpio, "disp_rst_n");
+		if (rc) {
+			pr_err("display reset gpio request failed\n");
+			return rc;
+		}
+
+		rc = gpio_request(ctrl_pdata->disp_dc_gpio, "disp_dc");
+		if (rc) {
+			pr_err("display dc gpio request failed\n");
+			return rc;
+		}
+
+		if (!pinfo->cont_splash_enabled) {
+			for (i = 0; i < pdata->panel_info.rst_seq_len; ++i) {
+				gpio_direction_output((ctrl_pdata->rst_gpio),
+					pdata->panel_info.rst_seq[i]);
+				if (pdata->panel_info.rst_seq[++i])
+					usleep_range(pinfo->rst_seq[i] * 1000,
+					pinfo->rst_seq[i] * 1000);
+			}
+		}
+
+		if (ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT) {
+			pr_debug("%s: Panel Not properly turned OFF\n",
+						__func__);
+			ctrl_pdata->ctrl_state &= ~CTRL_STATE_PANEL_INIT;
+			pr_err("%s: Reset panel done\n", __func__);
+		}
+	} else {
+		gpio_direction_output((ctrl_pdata->rst_gpio), 0);
+		gpio_free(ctrl_pdata->rst_gpio);
+
+		gpio_direction_output(ctrl_pdata->disp_dc_gpio, 0);
+		gpio_free(ctrl_pdata->disp_dc_gpio);
+	}
+	return rc;
+}
+
+
+static int mdss_spi_panel_pinctrl_set_state(
+	struct spi_panel_data *ctrl_pdata,
+	bool active)
+{
+	struct pinctrl_state *pin_state;
+	int rc = -EFAULT;
+
+	if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.pinctrl))
+		return PTR_ERR(ctrl_pdata->pin_res.pinctrl);
+
+	pin_state = active ? ctrl_pdata->pin_res.gpio_state_active
+				: ctrl_pdata->pin_res.gpio_state_suspend;
+	if (!IS_ERR_OR_NULL(pin_state)) {
+		rc = pinctrl_select_state(ctrl_pdata->pin_res.pinctrl,
+				pin_state);
+		if (rc)
+			pr_err("%s: can not set %s pins\n", __func__,
+			       active ? MDSS_PINCTRL_STATE_DEFAULT
+			       : MDSS_PINCTRL_STATE_SLEEP);
+	} else {
+		pr_err("%s: invalid '%s' pinstate\n", __func__,
+		       active ? MDSS_PINCTRL_STATE_DEFAULT
+		       : MDSS_PINCTRL_STATE_SLEEP);
+	}
+	return rc;
+}
+
+
+static int mdss_spi_panel_pinctrl_init(struct platform_device *pdev)
+{
+	struct spi_panel_data *ctrl_pdata;
+
+	ctrl_pdata = platform_get_drvdata(pdev);
+	ctrl_pdata->pin_res.pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.pinctrl)) {
+		pr_err("%s: failed to get pinctrl\n", __func__);
+		return PTR_ERR(ctrl_pdata->pin_res.pinctrl);
+	}
+
+	ctrl_pdata->pin_res.gpio_state_active
+		= pinctrl_lookup_state(ctrl_pdata->pin_res.pinctrl,
+				MDSS_PINCTRL_STATE_DEFAULT);
+	if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.gpio_state_active))
+		pr_warn("%s: can not get default pinstate\n", __func__);
+
+	ctrl_pdata->pin_res.gpio_state_suspend
+		= pinctrl_lookup_state(ctrl_pdata->pin_res.pinctrl,
+				MDSS_PINCTRL_STATE_SLEEP);
+	if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.gpio_state_suspend))
+		pr_warn("%s: can not get sleep pinstate\n", __func__);
+
+	return 0;
+}
+
+
+static int mdss_spi_panel_power_on(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+	ret = msm_mdss_enable_vreg(
+		ctrl_pdata->panel_power_data.vreg_config,
+		ctrl_pdata->panel_power_data.num_vreg, 1);
+	if (ret) {
+		pr_err("%s: failed to enable vregs for %s\n",
+			__func__, "PANEL_PM");
+	}
+
+	/*
+	 * If continuous splash screen feature is enabled, then we need to
+	 * request all the GPIOs that have already been configured in the
+	 * bootloader. This needs to be done irresepective of whether
+	 * the lp11_init flag is set or not.
+	 */
+	if (pdata->panel_info.cont_splash_enabled) {
+		if (mdss_spi_panel_pinctrl_set_state(ctrl_pdata, true))
+			pr_debug("reset enable: pinctrl not enabled\n");
+
+		ret = mdss_spi_panel_reset(pdata, 1);
+		if (ret)
+			pr_err("%s: Panel reset failed. rc=%d\n",
+					__func__, ret);
+	}
+
+	return ret;
+}
+
+
+static int mdss_spi_panel_power_off(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	ret = mdss_spi_panel_reset(pdata, 0);
+	if (ret) {
+		pr_warn("%s: Panel reset failed. rc=%d\n", __func__, ret);
+		ret = 0;
+	}
+
+	if (mdss_spi_panel_pinctrl_set_state(ctrl_pdata, false))
+		pr_warn("reset disable: pinctrl not enabled\n");
+
+	ret = msm_mdss_enable_vreg(
+		ctrl_pdata->panel_power_data.vreg_config,
+		ctrl_pdata->panel_power_data.num_vreg, 0);
+	if (ret)
+		pr_err("%s: failed to disable vregs for %s\n",
+			__func__, "PANEL_PM");
+
+end:
+	return ret;
+}
+
+
+static int mdss_spi_panel_power_ctrl(struct mdss_panel_data *pdata,
+	int power_state)
+{
+	int ret;
+	struct mdss_panel_info *pinfo;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	pinfo = &pdata->panel_info;
+	pr_debug("%s: cur_power_state=%d req_power_state=%d\n", __func__,
+		pinfo->panel_power_state, power_state);
+
+	if (pinfo->panel_power_state == power_state) {
+		pr_debug("%s: no change needed\n", __func__);
+		return 0;
+	}
+
+	switch (power_state) {
+	case MDSS_PANEL_POWER_OFF:
+		ret = mdss_spi_panel_power_off(pdata);
+		break;
+	case MDSS_PANEL_POWER_ON:
+		ret = mdss_spi_panel_power_on(pdata);
+		break;
+	default:
+		pr_err("%s: unknown panel power state requested (%d)\n",
+			__func__, power_state);
+		ret = -EINVAL;
+	}
+
+	if (!ret)
+		pinfo->panel_power_state = power_state;
+
+	return ret;
+}
+
+static int mdss_spi_panel_unblank(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	if (!(ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT)) {
+		ret = ctrl_pdata->on(pdata);
+		if (ret) {
+			pr_err("%s: unable to initialize the panel\n",
+							__func__);
+			return ret;
+		}
+		ctrl_pdata->ctrl_state |= CTRL_STATE_PANEL_INIT;
+	}
+
+	return ret;
+}
+
+static int mdss_spi_panel_blank(struct mdss_panel_data *pdata, int power_state)
+{
+	int ret = 0;
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	if (ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT) {
+		ret = ctrl_pdata->off(pdata);
+		if (ret) {
+			pr_err("%s: Panel OFF failed\n", __func__);
+			return ret;
+		}
+		ctrl_pdata->ctrl_state &= ~CTRL_STATE_PANEL_INIT;
+	}
+
+	return ret;
+}
+
+
+static int mdss_spi_panel_event_handler(struct mdss_panel_data *pdata,
+				  int event, void *arg)
+{
+	int rc = 0;
+	struct spi_panel_data *ctrl_pdata = NULL;
+	int power_state;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	switch (event) {
+	case MDSS_EVENT_LINK_READY:
+		rc = mdss_spi_panel_power_ctrl(pdata, MDSS_PANEL_POWER_ON);
+		if (rc) {
+			pr_err("%s:Panel power on failed. rc=%d\n",
+							__func__, rc);
+			return rc;
+		}
+		mdss_spi_panel_pinctrl_set_state(ctrl_pdata, true);
+		mdss_spi_panel_reset(pdata, 1);
+		break;
+	case MDSS_EVENT_UNBLANK:
+		rc = mdss_spi_panel_unblank(pdata);
+		break;
+	case MDSS_EVENT_PANEL_ON:
+		ctrl_pdata->ctrl_state |= CTRL_STATE_MDP_ACTIVE;
+		break;
+	case MDSS_EVENT_BLANK:
+		power_state = (int) (unsigned long) arg;
+		break;
+	case MDSS_EVENT_PANEL_OFF:
+		power_state = (int) (unsigned long) arg;
+		ctrl_pdata->ctrl_state &= ~CTRL_STATE_MDP_ACTIVE;
+		rc = mdss_spi_panel_blank(pdata, power_state);
+		rc = mdss_spi_panel_power_ctrl(pdata, power_state);
+		break;
+	default:
+		pr_debug("%s: unhandled event=%d\n", __func__, event);
+		break;
+	}
+	pr_debug("%s-:event=%d, rc=%d\n", __func__, event, rc);
+	return rc;
+}
+
+int is_spi_panel_continuous_splash_on(struct mdss_panel_data *pdata)
+{
+	int i = 0, voltage = 0;
+	struct mdss_vreg *vreg;
+	int num_vreg;
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+			panel_data);
+	vreg = ctrl_pdata->panel_power_data.vreg_config;
+	num_vreg = ctrl_pdata->panel_power_data.num_vreg;
+
+	for (i = 0; i < num_vreg; i++) {
+		if (regulator_is_enabled(vreg[i].vreg) <= 0)
+			return false;
+		voltage = regulator_get_voltage(vreg[i].vreg);
+		if (!(voltage >= vreg[i].min_voltage &&
+			 voltage <= vreg[i].max_voltage))
+			return false;
+	}
+
+	return true;
+}
+
+static void enable_spi_panel_te_irq(struct spi_panel_data *ctrl_pdata,
+							bool enable)
+{
+	static bool is_enabled = true;
+
+	if (is_enabled == enable)
+		return;
+
+	if (!gpio_is_valid(ctrl_pdata->disp_te_gpio)) {
+		pr_err("%s:%d,SPI panel TE GPIO not configured\n",
+			   __func__, __LINE__);
+		return;
+	}
+
+	if (enable)
+		enable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio));
+	else
+		disable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio));
+
+	is_enabled = enable;
+}
+
+int mdss_spi_panel_kickoff(struct mdss_panel_data *pdata,
+			char *buf, int len, int dma_stride)
+{
+	struct spi_panel_data *ctrl_pdata = NULL;
+	char *tx_buf;
+	int rc = 0;
+	int panel_yres;
+	int panel_xres;
+	int padding_length = 0;
+	int actual_stride = 0;
+	int byte_per_pixel = 0;
+	int scan_count = 0;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	tx_buf = ctrl_pdata->tx_buf;
+	panel_xres = ctrl_pdata->panel_data.panel_info.xres;
+	panel_yres = ctrl_pdata->panel_data.panel_info.yres;
+
+	byte_per_pixel = ctrl_pdata->panel_data.panel_info.bpp / 8;
+	actual_stride = panel_xres * byte_per_pixel;
+	padding_length = dma_stride - actual_stride;
+
+	/* remove the padding and copy to continuous buffer */
+	while (scan_count < panel_yres) {
+		memcpy((tx_buf + scan_count * actual_stride),
+			(buf + scan_count * (actual_stride + padding_length)),
+				actual_stride);
+		scan_count++;
+	}
+
+	enable_spi_panel_te_irq(ctrl_pdata, true);
+
+	mutex_lock(&ctrl_pdata->spi_tx_mutex);
+	reinit_completion(&ctrl_pdata->spi_panel_te);
+
+	rc = wait_for_completion_timeout(&ctrl_pdata->spi_panel_te,
+				   msecs_to_jiffies(SPI_PANEL_TE_TIMEOUT));
+
+	if (rc == 0)
+		pr_err("wait panel TE time out\n");
+
+	rc = mdss_spi_tx_pixel(tx_buf, ctrl_pdata->byte_pre_frame);
+	mutex_unlock(&ctrl_pdata->spi_tx_mutex);
+
+	return rc;
+}
+
+static int mdss_spi_read_panel_data(struct mdss_panel_data *pdata,
+		u8 reg_addr, u8 *data, u8 len)
+{
+	int rc = 0;
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+		panel_data);
+
+	mutex_lock(&ctrl_pdata->spi_tx_mutex);
+	gpio_direction_output(ctrl_pdata->disp_dc_gpio, 0);
+	rc = mdss_spi_read_data(reg_addr, data, len);
+	gpio_direction_output(ctrl_pdata->disp_dc_gpio, 1);
+	mutex_unlock(&ctrl_pdata->spi_tx_mutex);
+
+	return rc;
+}
+
+static int mdss_spi_panel_on(struct mdss_panel_data *pdata)
+{
+	struct spi_panel_data *ctrl = NULL;
+	struct mdss_panel_info *pinfo;
+	int i;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+	pinfo = &pdata->panel_info;
+	ctrl = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	for (i = 0; i < ctrl->on_cmds.cmd_cnt; i++) {
+		/* pull down dc gpio indicate this is command */
+		gpio_direction_output(ctrl->disp_dc_gpio, 0);
+		mdss_spi_tx_command(ctrl->on_cmds.cmds[i].command);
+		gpio_direction_output((ctrl->disp_dc_gpio), 1);
+
+		if (ctrl->on_cmds.cmds[i].dchdr.dlen > 1) {
+			mdss_spi_tx_parameter(ctrl->on_cmds.cmds[i].parameter,
+					ctrl->on_cmds.cmds[i].dchdr.dlen-1);
+		}
+		if (ctrl->on_cmds.cmds[i].dchdr.wait != 0)
+			msleep(ctrl->on_cmds.cmds[i].dchdr.wait);
+	}
+
+	pinfo->blank_state = MDSS_PANEL_BLANK_UNBLANK;
+
+	pr_debug("%s:-\n", __func__);
+
+	return 0;
+}
+
+
+static int mdss_spi_panel_off(struct mdss_panel_data *pdata)
+{
+	struct spi_panel_data *ctrl = NULL;
+	struct mdss_panel_info *pinfo;
+	int i;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	pinfo = &pdata->panel_info;
+	ctrl = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	for (i = 0; i < ctrl->off_cmds.cmd_cnt; i++) {
+		/* pull down dc gpio indicate this is command */
+		gpio_direction_output(ctrl->disp_dc_gpio, 0);
+		mdss_spi_tx_command(ctrl->off_cmds.cmds[i].command);
+		gpio_direction_output((ctrl->disp_dc_gpio), 1);
+
+		if (ctrl->off_cmds.cmds[i].dchdr.dlen > 1) {
+			mdss_spi_tx_parameter(ctrl->off_cmds.cmds[i].parameter,
+					ctrl->off_cmds.cmds[i].dchdr.dlen-1);
+		}
+
+		if (ctrl->off_cmds.cmds[i].dchdr.wait != 0)
+			msleep(ctrl->off_cmds.cmds[i].dchdr.wait);
+	}
+
+	pinfo->blank_state = MDSS_PANEL_BLANK_BLANK;
+
+	pr_debug("%s:-\n", __func__);
+	return 0;
+}
+
+static void mdss_spi_put_dt_vreg_data(struct device *dev,
+	struct mdss_module_power *module_power)
+{
+	if (!module_power) {
+		pr_err("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (module_power->vreg_config) {
+		devm_kfree(dev, module_power->vreg_config);
+		module_power->vreg_config = NULL;
+	}
+	module_power->num_vreg = 0;
+}
+
+
+static int mdss_spi_get_panel_vreg_data(struct device *dev,
+			struct mdss_module_power *mp)
+{
+	int i = 0, rc = 0;
+	u32 tmp = 0;
+	struct device_node *of_node = NULL, *supply_node = NULL;
+	struct device_node *supply_root_node = NULL;
+
+	if (!dev || !mp) {
+		pr_err("%s: invalid input\n", __func__);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	of_node = dev->of_node;
+
+	mp->num_vreg = 0;
+
+	supply_root_node = of_get_child_by_name(of_node,
+				"qcom,panel-supply-entries");
+
+	for_each_available_child_of_node(supply_root_node, supply_node) {
+		mp->num_vreg++;
+	}
+	if (mp->num_vreg == 0) {
+		pr_debug("%s: no vreg\n", __func__);
+		goto novreg;
+	} else {
+		pr_debug("%s: vreg found. count=%d\n", __func__, mp->num_vreg);
+	}
+
+	mp->vreg_config = kcalloc(mp->num_vreg, sizeof(struct mdss_vreg),
+				GFP_KERNEL);
+
+	if (mp->vreg_config != NULL) {
+		for_each_available_child_of_node(supply_root_node,
+				supply_node) {
+			const char *st = NULL;
+			/* vreg-name */
+			rc = of_property_read_string(supply_node,
+				"qcom,supply-name", &st);
+			if (rc) {
+				pr_err("%s: error reading name. rc=%d\n",
+					__func__, rc);
+				goto error;
+			}
+			snprintf(mp->vreg_config[i].vreg_name,
+				ARRAY_SIZE((mp->vreg_config[i].vreg_name)),
+					"%s", st);
+			/* vreg-min-voltage */
+			rc = of_property_read_u32(supply_node,
+				"qcom,supply-min-voltage", &tmp);
+			if (rc) {
+				pr_err("%s: error reading min volt. rc=%d\n",
+					__func__, rc);
+				goto error;
+			}
+			mp->vreg_config[i].min_voltage = tmp;
+
+			/* vreg-max-voltage */
+			rc = of_property_read_u32(supply_node,
+				"qcom,supply-max-voltage", &tmp);
+			if (rc) {
+				pr_err("%s: error reading max volt. rc=%d\n",
+					__func__, rc);
+				goto error;
+			}
+			mp->vreg_config[i].max_voltage = tmp;
+
+			/* enable-load */
+			rc = of_property_read_u32(supply_node,
+				"qcom,supply-enable-load", &tmp);
+			if (rc) {
+				pr_err("%s: error read enable load. rc=%d\n",
+					__func__, rc);
+				goto error;
+			}
+			mp->vreg_config[i].load[DSS_REG_MODE_ENABLE] = tmp;
+
+			/* disable-load */
+			rc = of_property_read_u32(supply_node,
+				"qcom,supply-disable-load", &tmp);
+			if (rc) {
+				pr_err("%s: error read disable load. rc=%d\n",
+					__func__, rc);
+				goto error;
+			}
+			mp->vreg_config[i].load[DSS_REG_MODE_DISABLE] = tmp;
+
+			/* pre-sleep */
+			rc = of_property_read_u32(supply_node,
+				"qcom,supply-pre-on-sleep", &tmp);
+			if (rc) {
+				pr_debug("%s: error read pre on value\n",
+						__func__);
+				rc = 0;
+			} else {
+				mp->vreg_config[i].pre_on_sleep = tmp;
+			}
+
+			rc = of_property_read_u32(supply_node,
+				"qcom,supply-pre-off-sleep", &tmp);
+			if (rc) {
+				pr_debug("%s: error read pre off value\n",
+						__func__);
+				rc = 0;
+			} else {
+				mp->vreg_config[i].pre_off_sleep = tmp;
+			}
+
+			/* post-sleep */
+			rc = of_property_read_u32(supply_node,
+				"qcom,supply-post-on-sleep", &tmp);
+			if (rc) {
+				pr_debug("%s: error read post on value\n",
+						__func__);
+				rc = 0;
+			} else {
+				mp->vreg_config[i].post_on_sleep = tmp;
+			}
+
+			rc = of_property_read_u32(supply_node,
+				"qcom,supply-post-off-sleep", &tmp);
+			if (rc) {
+				pr_debug("%s: error read post off value\n",
+						__func__);
+				rc = 0;
+			} else {
+				mp->vreg_config[i].post_off_sleep = tmp;
+			}
+
+			++i;
+		}
+	}
+	return rc;
+error:
+	kfree(mp->vreg_config);
+	mp->vreg_config = NULL;
+
+novreg:
+	mp->num_vreg = 0;
+
+	return rc;
+
+}
+
+static int mdss_spi_panel_parse_cmds(struct device_node *np,
+		struct spi_panel_cmds *pcmds, char *cmd_key)
+{
+	const char *data;
+	int blen = 0, len;
+	char *buf, *bp;
+	struct spi_ctrl_hdr *dchdr;
+	int i, cnt;
+
+	data = of_get_property(np, cmd_key, &blen);
+	if (!data) {
+		pr_err("%s: failed, key=%s\n", __func__, cmd_key);
+		return -ENOMEM;
+	}
+
+	buf = kcalloc(blen, sizeof(char), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	memcpy(buf, data, blen);
+
+	/* scan dcs commands */
+	bp = buf;
+	len = blen;
+	cnt = 0;
+	while (len >= sizeof(*dchdr)) {
+		dchdr = (struct spi_ctrl_hdr *)bp;
+		if (dchdr->dlen > len) {
+			pr_err("%s: dtsi parse error, len=%d",
+				__func__, dchdr->dlen);
+			goto exit_free;
+		}
+		bp += sizeof(*dchdr);
+		len -= sizeof(*dchdr);
+		bp += dchdr->dlen;
+		len -= dchdr->dlen;
+		cnt++;
+	}
+
+	if (len != 0) {
+		pr_err("%s: dcs_cmd=%x len=%d error",
+				__func__, buf[0], len);
+		goto exit_free;
+	}
+
+	pcmds->cmds = kcalloc(cnt, sizeof(struct spi_cmd_desc),
+						GFP_KERNEL);
+	if (!pcmds->cmds)
+		goto exit_free;
+
+	pcmds->cmd_cnt = cnt;
+	pcmds->buf = buf;
+	pcmds->blen = blen;
+
+	bp = buf;
+	len = blen;
+	for (i = 0; i < cnt; i++) {
+		dchdr = (struct spi_ctrl_hdr *)bp;
+		len -= sizeof(*dchdr);
+		bp += sizeof(*dchdr);
+		pcmds->cmds[i].dchdr = *dchdr;
+		pcmds->cmds[i].command = bp;
+		pcmds->cmds[i].parameter = bp + sizeof(char);
+		bp += dchdr->dlen;
+		len -= dchdr->dlen;
+	}
+
+	pr_debug("%s: dcs_cmd=%x, len=%d, cmd_cnt=%d\n", __func__,
+		pcmds->buf[0], pcmds->blen, pcmds->cmd_cnt);
+
+	return 0;
+
+exit_free:
+	kfree(buf);
+	return -ENOMEM;
+}
+static int mdss_spi_panel_parse_reset_seq(struct device_node *np,
+		u32 rst_seq[MDSS_SPI_RST_SEQ_LEN], u32 *rst_len,
+		const char *name)
+{
+	int num = 0, i;
+	int rc;
+	struct property *data;
+	u32 tmp[MDSS_SPI_RST_SEQ_LEN];
+
+	*rst_len = 0;
+	data = of_find_property(np, name, &num);
+	num /= sizeof(u32);
+	if (!data || !num || num > MDSS_SPI_RST_SEQ_LEN || num % 2) {
+		pr_err("%s:%d, error reading %s, length found = %d\n",
+			__func__, __LINE__, name, num);
+	} else {
+		rc = of_property_read_u32_array(np, name, tmp, num);
+		if (rc)
+			pr_err("%s:%d, error reading %s, rc = %d\n",
+				__func__, __LINE__, name, rc);
+		else {
+			for (i = 0; i < num; ++i)
+				rst_seq[i] = tmp[i];
+			*rst_len = num;
+		}
+	}
+	return 0;
+}
+
+static bool mdss_send_panel_cmd_for_esd(struct spi_panel_data *ctrl_pdata)
+{
+
+	if (ctrl_pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return false;
+	}
+
+	mutex_lock(&ctrl_pdata->spi_tx_mutex);
+	mdss_spi_panel_on(&ctrl_pdata->panel_data);
+	mutex_unlock(&ctrl_pdata->spi_tx_mutex);
+
+	return true;
+}
+
+static bool mdss_spi_reg_status_check(struct spi_panel_data *ctrl_pdata)
+{
+	int ret = 0;
+	int i = 0;
+
+	if (ctrl_pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return false;
+	}
+
+	pr_debug("%s: Checking Register status\n", __func__);
+
+	ret = mdss_spi_read_panel_data(&ctrl_pdata->panel_data,
+					ctrl_pdata->panel_status_reg,
+					ctrl_pdata->act_status_value,
+					ctrl_pdata->status_cmds_rlen);
+	if (ret < 0) {
+		pr_err("%s: Read status register returned error\n", __func__);
+	} else {
+		for (i = 0; i < ctrl_pdata->status_cmds_rlen; i++) {
+			pr_debug("act_value[%d] = %x, exp_value[%d] = %x\n",
+					i, ctrl_pdata->act_status_value[i],
+					i, ctrl_pdata->exp_status_value[i]);
+			if (ctrl_pdata->act_status_value[i] !=
+					ctrl_pdata->exp_status_value[i])
+				return false;
+		}
+	}
+
+	return true;
+}
+
+static void mdss_spi_parse_esd_params(struct device_node *np,
+		struct spi_panel_data	*ctrl)
+{
+	u32 tmp;
+	int rc;
+	struct property *data;
+	const char *string;
+	struct mdss_panel_info *pinfo = &ctrl->panel_data.panel_info;
+
+	pinfo->esd_check_enabled = of_property_read_bool(np,
+		"qcom,esd-check-enabled");
+
+	if (!pinfo->esd_check_enabled)
+		return;
+
+	ctrl->status_mode = SPI_ESD_MAX;
+
+	rc = of_property_read_string(np,
+			"qcom,mdss-spi-panel-status-check-mode", &string);
+	if (!rc) {
+		if (!strcmp(string, "reg_read")) {
+			ctrl->status_mode = SPI_ESD_REG;
+			ctrl->check_status =
+				mdss_spi_reg_status_check;
+		} else if (!strcmp(string, "send_init_command")) {
+			ctrl->status_mode = SPI_SEND_PANEL_COMMAND;
+			ctrl->check_status =
+				mdss_send_panel_cmd_for_esd;
+				return;
+		} else {
+			pr_err("No valid panel-status-check-mode string\n");
+			pinfo->esd_check_enabled = false;
+			return;
+		}
+	}
+
+	rc = of_property_read_u8(np, "qcom,mdss-spi-panel-status-reg",
+				&ctrl->panel_status_reg);
+	if (rc) {
+		pr_warn("%s:%d, Read status reg failed, disable ESD check\n",
+				__func__, __LINE__);
+		pinfo->esd_check_enabled = false;
+		return;
+	}
+
+	rc = of_property_read_u32(np, "qcom,mdss-spi-panel-status-read-length",
+					&tmp);
+	if (rc) {
+		pr_warn("%s:%d, Read reg length failed, disable ESD check\n",
+				__func__, __LINE__);
+		pinfo->esd_check_enabled = false;
+		return;
+	}
+
+	ctrl->status_cmds_rlen = (!rc ? tmp : 1);
+
+	ctrl->exp_status_value = kzalloc(sizeof(u8) *
+				 (ctrl->status_cmds_rlen + 1), GFP_KERNEL);
+	ctrl->act_status_value = kzalloc(sizeof(u8) *
+				(ctrl->status_cmds_rlen + 1), GFP_KERNEL);
+
+	if (!ctrl->exp_status_value || !ctrl->act_status_value) {
+		pr_err("%s: Error allocating memory for status buffer\n",
+						__func__);
+		pinfo->esd_check_enabled = false;
+		return;
+	}
+
+	data = of_find_property(np, "qcom,mdss-spi-panel-status-value", &tmp);
+	tmp /= sizeof(u8);
+	if (!data || (tmp != ctrl->status_cmds_rlen)) {
+		pr_err("%s: Panel status values not found\n", __func__);
+		pinfo->esd_check_enabled = false;
+		memset(ctrl->exp_status_value, 0, ctrl->status_cmds_rlen);
+	} else {
+		rc = of_property_read_u8_array(np,
+			"qcom,mdss-spi-panel-status-value",
+			ctrl->exp_status_value, tmp);
+		if (rc) {
+			pr_err("%s: Error reading panel status values\n",
+				__func__);
+			pinfo->esd_check_enabled = false;
+			memset(ctrl->exp_status_value, 0,
+				ctrl->status_cmds_rlen);
+		}
+	}
+}
+
+static int mdss_spi_panel_parse_dt(struct device_node *np,
+		struct spi_panel_data	*ctrl_pdata)
+{
+	u32 tmp;
+	int rc;
+	const char *data;
+	struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+
+	pinfo->cont_splash_enabled = of_property_read_bool(np,
+					"qcom,cont-splash-enabled");
+
+	rc = of_property_read_u32(np, "qcom,mdss-spi-panel-width", &tmp);
+	if (rc) {
+		pr_err("%s: panel width not specified\n", __func__);
+		return -EINVAL;
+	}
+	pinfo->xres = (!rc ? tmp : 240);
+
+	rc = of_property_read_u32(np, "qcom,mdss-spi-panel-height", &tmp);
+	if (rc) {
+		pr_err("%s:panel height not specified\n", __func__);
+		return -EINVAL;
+	}
+	pinfo->yres = (!rc ? tmp : 320);
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-physical-width-dimension", &tmp);
+	pinfo->physical_width = (!rc ? tmp : 0);
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-physical-height-dimension", &tmp);
+	pinfo->physical_height = (!rc ? tmp : 0);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-panel-framerate", &tmp);
+	pinfo->spi.frame_rate = (!rc ? tmp : 30);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-h-front-porch", &tmp);
+	pinfo->lcdc.h_front_porch = (!rc ? tmp : 6);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-h-back-porch", &tmp);
+	pinfo->lcdc.h_back_porch = (!rc ? tmp : 6);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-h-pulse-width", &tmp);
+	pinfo->lcdc.h_pulse_width = (!rc ? tmp : 2);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-h-sync-skew", &tmp);
+	pinfo->lcdc.hsync_skew = (!rc ? tmp : 0);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-v-back-porch", &tmp);
+	pinfo->lcdc.v_back_porch = (!rc ? tmp : 6);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-v-front-porch", &tmp);
+	pinfo->lcdc.v_front_porch = (!rc ? tmp : 6);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-v-pulse-width", &tmp);
+	pinfo->lcdc.v_pulse_width = (!rc ? tmp : 2);
+
+
+	rc = of_property_read_u32(np, "qcom,mdss-spi-bpp", &tmp);
+	if (rc) {
+		pr_err("%s: bpp not specified\n", __func__);
+		return -EINVAL;
+	}
+	pinfo->bpp = (!rc ? tmp : 16);
+
+	pinfo->pdest = DISPLAY_1;
+
+	ctrl_pdata->bklt_ctrl = SPI_UNKNOWN_CTRL;
+	data = of_get_property(np, "qcom,mdss-spi-bl-pmic-control-type", NULL);
+	if (data) {
+		if (!strcmp(data, "bl_ctrl_wled")) {
+			led_trigger_register_simple("bkl-trigger",
+				&bl_led_trigger);
+			pr_debug("%s: SUCCESS-> WLED TRIGGER register\n",
+				__func__);
+			ctrl_pdata->bklt_ctrl = SPI_BL_WLED;
+		} else if (!strcmp(data, "bl_gpio_pulse")) {
+			led_trigger_register_simple("gpio-bklt-trigger",
+				&bl_led_trigger);
+			pr_debug("%s: SUCCESS-> GPIO PULSE TRIGGER register\n",
+				__func__);
+			ctrl_pdata->bklt_ctrl = SPI_BL_WLED;
+		} else if (!strcmp(data, "bl_ctrl_pwm")) {
+			ctrl_pdata->bklt_ctrl = SPI_BL_PWM;
+			ctrl_pdata->pwm_pmi = of_property_read_bool(np,
+					"qcom,mdss-spi-bl-pwm-pmi");
+			rc = of_property_read_u32(np,
+				"qcom,mdss-spi-bl-pmic-pwm-frequency", &tmp);
+			if (rc) {
+				pr_err("%s: Error, panel pwm_period\n",
+					 __func__);
+				return -EINVAL;
+			}
+			ctrl_pdata->pwm_period = tmp;
+			if (ctrl_pdata->pwm_pmi) {
+				ctrl_pdata->pwm_bl = of_pwm_get(np, NULL);
+				if (IS_ERR(ctrl_pdata->pwm_bl)) {
+					pr_err("%s: Error, pwm device\n",
+							__func__);
+					ctrl_pdata->pwm_bl = NULL;
+					return -EINVAL;
+				}
+			} else {
+				rc = of_property_read_u32(np,
+					"qcom,mdss-spi-bl-pmic-bank-select",
+								 &tmp);
+				if (rc) {
+					pr_err("%s: Error, lpg channel\n",
+						__func__);
+					return -EINVAL;
+				}
+				ctrl_pdata->pwm_lpg_chan = tmp;
+				tmp = of_get_named_gpio(np,
+					"qcom,mdss-spi-pwm-gpio", 0);
+				ctrl_pdata->pwm_pmic_gpio = tmp;
+				pr_debug("%s: Configured PWM bklt ctrl\n",
+								 __func__);
+			}
+		}
+	}
+	rc = of_property_read_u32(np, "qcom,mdss-brightness-max-level", &tmp);
+	pinfo->brightness_max = (!rc ? tmp : MDSS_MAX_BL_BRIGHTNESS);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-bl-min-level", &tmp);
+	pinfo->bl_min = (!rc ? tmp : 0);
+	rc = of_property_read_u32(np, "qcom,mdss-spi-bl-max-level", &tmp);
+	pinfo->bl_max = (!rc ? tmp : 255);
+	ctrl_pdata->bklt_max = pinfo->bl_max;
+
+
+	mdss_spi_panel_parse_reset_seq(np, pinfo->rst_seq,
+					&(pinfo->rst_seq_len),
+					"qcom,mdss-spi-reset-sequence");
+
+	mdss_spi_panel_parse_cmds(np, &ctrl_pdata->on_cmds,
+		"qcom,mdss-spi-on-command");
+
+	mdss_spi_panel_parse_cmds(np, &ctrl_pdata->off_cmds,
+		"qcom,mdss-spi-off-command");
+
+	mdss_spi_parse_esd_params(np, ctrl_pdata);
+
+
+	return 0;
+}
+
+static void mdss_spi_panel_pwm_cfg(struct spi_panel_data *ctrl)
+{
+	if (ctrl->pwm_pmi)
+		return;
+
+	ctrl->pwm_bl = pwm_request(ctrl->pwm_lpg_chan, "lcd-bklt");
+	if (ctrl->pwm_bl == NULL || IS_ERR(ctrl->pwm_bl)) {
+		pr_err("%s: Error: lpg_chan=%d pwm request failed",
+				__func__, ctrl->pwm_lpg_chan);
+	}
+	ctrl->pwm_enabled = 0;
+}
+
+static void mdss_spi_panel_bklt_pwm(struct spi_panel_data *ctrl, int level)
+{
+	int ret;
+	u32 duty;
+	u32 period_ns;
+
+	if (ctrl->pwm_bl == NULL) {
+		pr_err("%s: no PWM\n", __func__);
+		return;
+	}
+
+	if (level == 0) {
+		if (ctrl->pwm_enabled) {
+			ret = pwm_config_us(ctrl->pwm_bl, level,
+					ctrl->pwm_period);
+			if (ret)
+				pr_err("%s: pwm_config_us() failed err=%d.\n",
+						__func__, ret);
+			pwm_disable(ctrl->pwm_bl);
+		}
+		ctrl->pwm_enabled = 0;
+		return;
+	}
+
+	duty = level * ctrl->pwm_period;
+	duty /= ctrl->bklt_max;
+
+	pr_debug("%s: bklt_ctrl=%d pwm_period=%d pwm_gpio=%d pwm_lpg_chan=%d\n",
+			__func__, ctrl->bklt_ctrl, ctrl->pwm_period,
+				ctrl->pwm_pmic_gpio, ctrl->pwm_lpg_chan);
+
+	if (ctrl->pwm_period >= USEC_PER_SEC) {
+		ret = pwm_config_us(ctrl->pwm_bl, duty, ctrl->pwm_period);
+		if (ret) {
+			pr_err("%s: pwm_config_us() failed err=%d\n",
+					__func__, ret);
+			return;
+		}
+	} else {
+		period_ns = ctrl->pwm_period * NSEC_PER_USEC;
+		ret = pwm_config(ctrl->pwm_bl,
+				level * period_ns / ctrl->bklt_max,
+				period_ns);
+		if (ret) {
+			pr_err("%s: pwm_config() failed err=%d\n",
+					__func__, ret);
+			return;
+		}
+	}
+
+	if (!ctrl->pwm_enabled) {
+		ret = pwm_enable(ctrl->pwm_bl);
+		if (ret)
+			pr_err("%s: pwm_enable() failed err=%d\n", __func__,
+				ret);
+		ctrl->pwm_enabled = 1;
+	}
+}
+
+static void mdss_spi_panel_bl_ctrl(struct mdss_panel_data *pdata,
+							u32 bl_level)
+{
+	if (bl_level) {
+		mdp3_res->bklt_level = bl_level;
+		mdp3_res->bklt_update = true;
+	} else {
+		mdss_spi_panel_bl_ctrl_update(pdata, bl_level);
+	}
+}
+
+#if defined(CONFIG_FB_MSM_MDSS_SPI_PANEL) && defined(CONFIG_SPI_QUP)
+void mdss_spi_panel_bl_ctrl_update(struct mdss_panel_data *pdata,
+							u32 bl_level)
+{
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	if ((bl_level < pdata->panel_info.bl_min) && (bl_level != 0))
+		bl_level = pdata->panel_info.bl_min;
+
+	switch (ctrl_pdata->bklt_ctrl) {
+	case SPI_BL_WLED:
+		led_trigger_event(bl_led_trigger, bl_level);
+		break;
+	case SPI_BL_PWM:
+		mdss_spi_panel_bklt_pwm(ctrl_pdata, bl_level);
+		break;
+	default:
+		pr_err("%s: Unknown bl_ctrl configuration %d\n",
+			__func__, ctrl_pdata->bklt_ctrl);
+		break;
+	}
+}
+#endif
+
+static int mdss_spi_panel_init(struct device_node *node,
+	struct spi_panel_data	*ctrl_pdata,
+	bool cmd_cfg_cont_splash)
+{
+	int rc = 0;
+	static const char *panel_name;
+	struct mdss_panel_info *pinfo;
+
+	if (!node || !ctrl_pdata) {
+		pr_err("%s: Invalid arguments\n", __func__);
+		return -ENODEV;
+	}
+
+	pinfo = &ctrl_pdata->panel_data.panel_info;
+
+	pr_debug("%s:%d\n", __func__, __LINE__);
+	pinfo->panel_name[0] = '\0';
+	panel_name = of_get_property(node, "qcom,mdss-spi-panel-name", NULL);
+	if (!panel_name) {
+		pr_info("%s:%d, Panel name not specified\n",
+						__func__, __LINE__);
+	} else {
+		pr_debug("%s: Panel Name = %s\n", __func__, panel_name);
+		strlcpy(&pinfo->panel_name[0], panel_name, MDSS_MAX_PANEL_LEN);
+	}
+	rc = mdss_spi_panel_parse_dt(node, ctrl_pdata);
+	if (rc) {
+		pr_err("%s:%d panel dt parse failed\n", __func__, __LINE__);
+		return rc;
+	}
+
+	ctrl_pdata->byte_pre_frame = pinfo->xres * pinfo->yres * pinfo->bpp/8;
+
+	ctrl_pdata->tx_buf = kmalloc(ctrl_pdata->byte_pre_frame, GFP_KERNEL);
+
+	if (!cmd_cfg_cont_splash)
+		pinfo->cont_splash_enabled = false;
+
+	pr_info("%s: Continuous splash %s\n", __func__,
+		pinfo->cont_splash_enabled ? "enabled" : "disabled");
+
+	pinfo->dynamic_switch_pending = false;
+	pinfo->is_lpm_mode = false;
+	pinfo->esd_rdy = false;
+
+	ctrl_pdata->on = mdss_spi_panel_on;
+	ctrl_pdata->off = mdss_spi_panel_off;
+	ctrl_pdata->panel_data.set_backlight = mdss_spi_panel_bl_ctrl;
+
+	return 0;
+}
+
+static int mdss_spi_get_panel_cfg(char *panel_cfg,
+				struct spi_panel_data	*ctrl_pdata)
+{
+	int rc;
+	struct mdss_panel_cfg *pan_cfg = NULL;
+
+	if (!ctrl_pdata)
+		return MDSS_PANEL_INTF_INVALID;
+
+	pan_cfg = ctrl_pdata->mdss_util->panel_intf_type(MDSS_PANEL_INTF_SPI);
+	if (IS_ERR(pan_cfg)) {
+		return PTR_ERR(pan_cfg);
+	} else if (!pan_cfg) {
+		panel_cfg[0] = 0;
+		return 0;
+	}
+
+	pr_debug("%s:%d: cfg:[%s]\n", __func__, __LINE__,
+		 pan_cfg->arg_cfg);
+	ctrl_pdata->panel_data.panel_info.is_prim_panel = true;
+	rc = strlcpy(panel_cfg, pan_cfg->arg_cfg,
+		     sizeof(pan_cfg->arg_cfg));
+	return rc;
+}
+
+static int mdss_spi_panel_regulator_init(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	if (!pdev) {
+		pr_err("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = platform_get_drvdata(pdev);
+	if (!ctrl_pdata) {
+		pr_err("%s: invalid driver data\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = msm_mdss_config_vreg(&pdev->dev,
+		ctrl_pdata->panel_power_data.vreg_config,
+		ctrl_pdata->panel_power_data.num_vreg, 1);
+	if (rc)
+		pr_err("%s: failed to init vregs for %s\n",
+			__func__, "PANEL_PM");
+
+	return rc;
+
+}
+
+static irqreturn_t spi_panel_te_handler(int irq, void *data)
+{
+	struct spi_panel_data *ctrl_pdata = (struct spi_panel_data *)data;
+	static int count = 2;
+
+	if (!ctrl_pdata) {
+		pr_err("%s: SPI display not available\n", __func__);
+		return IRQ_HANDLED;
+	}
+	complete(&ctrl_pdata->spi_panel_te);
+
+	if (ctrl_pdata->vsync_client.handler && !(--count)) {
+		ctrl_pdata->vsync_client.handler(ctrl_pdata->vsync_client.arg);
+		count = 2;
+	}
+
+	return IRQ_HANDLED;
+}
+
+void mdp3_spi_vsync_enable(struct mdss_panel_data *pdata,
+				struct mdp3_notification *vsync_client)
+{
+	int updated = 0;
+	struct spi_panel_data *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct spi_panel_data,
+				panel_data);
+
+	if (vsync_client) {
+		if (ctrl_pdata->vsync_client.handler != vsync_client->handler) {
+			ctrl_pdata->vsync_client = *vsync_client;
+			updated = 1;
+		}
+	} else {
+		if (ctrl_pdata->vsync_client.handler) {
+			ctrl_pdata->vsync_client.handler = NULL;
+			ctrl_pdata->vsync_client.arg = NULL;
+			updated = 1;
+		}
+	}
+
+	if (updated) {
+		if (vsync_client && vsync_client->handler)
+			enable_spi_panel_te_irq(ctrl_pdata, true);
+		else
+			enable_spi_panel_te_irq(ctrl_pdata, false);
+	}
+}
+
+static struct device_node *mdss_spi_pref_prim_panel(
+		struct platform_device *pdev)
+{
+	struct device_node *spi_pan_node = NULL;
+
+	pr_debug("%s:%d: Select primary panel from dt\n",
+					__func__, __LINE__);
+	spi_pan_node = of_parse_phandle(pdev->dev.of_node,
+					"qcom,spi-pref-prim-pan", 0);
+	if (!spi_pan_node)
+		pr_err("%s:can't find panel phandle\n", __func__);
+
+	return spi_pan_node;
+}
+
+static int spi_panel_device_register(struct device_node *pan_node,
+				struct spi_panel_data *ctrl_pdata)
+{
+	int rc;
+	struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+	struct device_node *spi_ctrl_np = NULL;
+	struct platform_device *ctrl_pdev = NULL;
+
+	pinfo->type = SPI_PANEL;
+
+	spi_ctrl_np = of_parse_phandle(pan_node,
+				"qcom,mdss-spi-panel-controller", 0);
+	if (!spi_ctrl_np) {
+		pr_err("%s: SPI controller node not initialized\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	ctrl_pdev = of_find_device_by_node(spi_ctrl_np);
+	if (!ctrl_pdev) {
+		of_node_put(spi_ctrl_np);
+		pr_err("%s: SPI controller node not find\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	rc = mdss_spi_panel_regulator_init(ctrl_pdev);
+	if (rc) {
+		pr_err("%s: failed to init regulator, rc=%d\n",
+						__func__, rc);
+		return rc;
+	}
+
+	pinfo->panel_max_fps = mdss_panel_get_framerate(pinfo,
+		FPS_RESOLUTION_HZ);
+	pinfo->panel_max_vtotal = mdss_panel_get_vtotal(pinfo);
+
+	ctrl_pdata->disp_te_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
+		"qcom,platform-te-gpio", 0);
+	if (!gpio_is_valid(ctrl_pdata->disp_te_gpio))
+		pr_err("%s:%d, TE gpio not specified\n",
+						__func__, __LINE__);
+
+	ctrl_pdata->disp_dc_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
+		"qcom,platform-spi-dc-gpio", 0);
+	if (!gpio_is_valid(ctrl_pdata->disp_dc_gpio))
+		pr_err("%s:%d, SPI DC gpio not specified\n",
+						__func__, __LINE__);
+
+	ctrl_pdata->rst_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
+			 "qcom,platform-reset-gpio", 0);
+	if (!gpio_is_valid(ctrl_pdata->rst_gpio))
+		pr_err("%s:%d, reset gpio not specified\n",
+						__func__, __LINE__);
+
+	ctrl_pdata->panel_data.event_handler = mdss_spi_panel_event_handler;
+
+	if (ctrl_pdata->bklt_ctrl == SPI_BL_PWM)
+		mdss_spi_panel_pwm_cfg(ctrl_pdata);
+
+	ctrl_pdata->ctrl_state = CTRL_STATE_UNKNOWN;
+
+	if (pinfo->cont_splash_enabled) {
+		rc = mdss_spi_panel_power_ctrl(&(ctrl_pdata->panel_data),
+			MDSS_PANEL_POWER_ON);
+		if (rc) {
+			pr_err("%s: Panel power on failed\n", __func__);
+			return rc;
+		}
+		if (ctrl_pdata->bklt_ctrl == SPI_BL_PWM)
+			ctrl_pdata->pwm_enabled = 1;
+		pinfo->blank_state = MDSS_PANEL_BLANK_UNBLANK;
+		ctrl_pdata->ctrl_state |=
+			(CTRL_STATE_PANEL_INIT | CTRL_STATE_MDP_ACTIVE);
+	} else {
+		pinfo->panel_power_state = MDSS_PANEL_POWER_OFF;
+	}
+
+	rc = mdss_register_panel(ctrl_pdev, &(ctrl_pdata->panel_data));
+	if (rc) {
+		pr_err("%s: unable to register SPI panel\n", __func__);
+		return rc;
+	}
+
+	pr_debug("%s: Panel data initialized\n", __func__);
+	return 0;
+}
+
+
+/**
+ * mdss_spi_find_panel_of_node(): find device node of spi panel
+ * @pdev: platform_device of the spi ctrl node
+ * @panel_cfg: string containing intf specific config data
+ *
+ * Function finds the panel device node using the interface
+ * specific configuration data. This configuration data is
+ * could be derived from the result of bootloader's GCDB
+ * panel detection mechanism. If such config data doesn't
+ * exist then this panel returns the default panel configured
+ * in the device tree.
+ *
+ * returns pointer to panel node on success, NULL on error.
+ */
+static struct device_node *mdss_spi_find_panel_of_node(
+		struct platform_device *pdev, char *panel_cfg)
+{
+	int len, i;
+	int ctrl_id = pdev->id - 1;
+	char panel_name[MDSS_MAX_PANEL_LEN] = "";
+	char ctrl_id_stream[3] =  "0:";
+	char *stream = NULL, *pan = NULL;
+	struct device_node *spi_pan_node = NULL, *mdss_node = NULL;
+
+	len = strlen(panel_cfg);
+	if (!len) {
+		/* no panel cfg chg, parse dt */
+		pr_err("%s:%d: no cmd line cfg present\n",
+			 __func__, __LINE__);
+		goto end;
+	} else {
+		if (ctrl_id == 1)
+			strlcpy(ctrl_id_stream, "1:", 3);
+
+		stream = strnstr(panel_cfg, ctrl_id_stream, len);
+		if (!stream) {
+			pr_err("controller config is not present\n");
+			goto end;
+		}
+		stream += 2;
+
+		pan = strnchr(stream, strlen(stream), ':');
+		if (!pan) {
+			strlcpy(panel_name, stream, MDSS_MAX_PANEL_LEN);
+		} else {
+			for (i = 0; (stream + i) < pan; i++)
+				panel_name[i] = *(stream + i);
+			panel_name[i] = 0;
+		}
+
+		pr_debug("%s:%d:%s:%s\n", __func__, __LINE__,
+			 panel_cfg, panel_name);
+
+		mdss_node = of_parse_phandle(pdev->dev.of_node,
+					     "qcom,mdss-mdp", 0);
+		if (!mdss_node) {
+			pr_err("%s: %d: mdss_node null\n",
+			       __func__, __LINE__);
+			return NULL;
+		}
+
+		spi_pan_node = of_find_node_by_name(mdss_node,
+						    panel_name);
+		if (!spi_pan_node) {
+			pr_err("%s: invalid pan node, selecting prim panel\n",
+			       __func__);
+			goto end;
+		}
+		return spi_pan_node;
+	}
+end:
+	if (strcmp(panel_name, NONE_PANEL))
+		spi_pan_node = mdss_spi_pref_prim_panel(pdev);
+	of_node_put(mdss_node);
+	return spi_pan_node;
+}
+
+
+static int mdss_spi_panel_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct spi_panel_data	*ctrl_pdata;
+	struct mdss_panel_cfg *pan_cfg = NULL;
+	struct device_node *spi_pan_node = NULL;
+	bool cmd_cfg_cont_splash = true;
+	char panel_cfg[MDSS_MAX_PANEL_LEN];
+	struct mdss_util_intf *util;
+	const char *ctrl_name;
+
+	util = mdss_get_util_intf();
+	if (util == NULL) {
+		pr_err("Failed to get mdss utility functions\n");
+		return -ENODEV;
+	}
+
+	if (!util->mdp_probe_done) {
+		pr_err("%s: MDP not probed yet\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	if (!pdev->dev.of_node) {
+		pr_err("SPI driver only supports device tree probe\n");
+		return -ENOTSUPP;
+	}
+
+	pan_cfg = util->panel_intf_type(MDSS_PANEL_INTF_DSI);
+	if (IS_ERR(pan_cfg)) {
+		pr_err("%s: return MDSS_PANEL_INTF_DSI\n", __func__);
+		return PTR_ERR(pan_cfg);
+	} else if (pan_cfg) {
+		pr_err("%s: DSI is primary\n", __func__);
+		return -ENODEV;
+	}
+
+	ctrl_pdata = platform_get_drvdata(pdev);
+	if (!ctrl_pdata) {
+		ctrl_pdata = devm_kzalloc(&pdev->dev,
+					  sizeof(struct spi_panel_data),
+					  GFP_KERNEL);
+		if (!ctrl_pdata) {
+			pr_err("%s: FAILED: cannot alloc spi panel\n",
+			       __func__);
+			rc = -ENOMEM;
+			goto error_no_mem;
+		}
+		platform_set_drvdata(pdev, ctrl_pdata);
+	}
+
+	ctrl_pdata->mdss_util = util;
+
+	ctrl_name = of_get_property(pdev->dev.of_node, "label", NULL);
+	if (!ctrl_name)
+		pr_info("%s:%d, Ctrl name not specified\n",
+			__func__, __LINE__);
+	else
+		pr_debug("%s: Ctrl name = %s\n",
+			__func__, ctrl_name);
+
+
+	rc = of_platform_populate(pdev->dev.of_node,
+				  NULL, NULL, &pdev->dev);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: failed to add child nodes, rc=%d\n",
+			__func__, rc);
+		goto error_no_mem;
+	}
+
+	rc = mdss_spi_panel_pinctrl_init(pdev);
+	if (rc)
+		pr_warn("%s: failed to get pin resources\n", __func__);
+
+	rc = mdss_spi_get_panel_vreg_data(&pdev->dev,
+					&ctrl_pdata->panel_power_data);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: failed to get panel vreg data, rc=%d\n",
+			__func__, rc);
+		goto error_vreg;
+	}
+
+	/* SPI panels can be different between controllers */
+	rc = mdss_spi_get_panel_cfg(panel_cfg, ctrl_pdata);
+	if (!rc)
+		/* spi panel cfg not present */
+		pr_warn("%s:%d:spi specific cfg not present\n",
+			__func__, __LINE__);
+
+	/* find panel device node */
+	spi_pan_node = mdss_spi_find_panel_of_node(pdev, panel_cfg);
+	if (!spi_pan_node) {
+		pr_err("%s: can't find panel node %s\n", __func__, panel_cfg);
+		goto error_pan_node;
+	}
+
+	cmd_cfg_cont_splash = true;
+
+	rc = mdss_spi_panel_init(spi_pan_node, ctrl_pdata, cmd_cfg_cont_splash);
+	if (rc) {
+		pr_err("%s: spi panel init failed\n", __func__);
+		goto error_pan_node;
+	}
+
+	rc = spi_panel_device_register(spi_pan_node, ctrl_pdata);
+	if (rc) {
+		pr_err("%s: spi panel dev reg failed\n", __func__);
+		goto error_pan_node;
+	}
+
+	ctrl_pdata->panel_data.event_handler = mdss_spi_panel_event_handler;
+
+
+	init_completion(&ctrl_pdata->spi_panel_te);
+	mutex_init(&ctrl_pdata->spi_tx_mutex);
+
+	rc = devm_request_irq(&pdev->dev,
+		gpio_to_irq(ctrl_pdata->disp_te_gpio),
+		spi_panel_te_handler, IRQF_TRIGGER_RISING,
+		"TE_GPIO", ctrl_pdata);
+	if (rc) {
+		pr_err("TE request_irq failed.\n");
+		return rc;
+	}
+
+	pr_debug("%s: spi panel  initialized\n", __func__);
+	return 0;
+
+error_pan_node:
+	of_node_put(spi_pan_node);
+error_vreg:
+	mdss_spi_put_dt_vreg_data(&pdev->dev,
+			&ctrl_pdata->panel_power_data);
+error_no_mem:
+	devm_kfree(&pdev->dev, ctrl_pdata);
+	return rc;
+}
+
+
+static const struct of_device_id mdss_spi_panel_match[] = {
+	{ .compatible = "qcom,mdss-spi-display" },
+	{},
+};
+
+static struct platform_driver this_driver = {
+	.probe = mdss_spi_panel_probe,
+	.driver = {
+		.name = "spi_panel",
+		.owner  = THIS_MODULE,
+		.of_match_table = mdss_spi_panel_match,
+	},
+};
+
+static int __init mdss_spi_display_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&this_driver);
+	return 0;
+}
+module_init(mdss_spi_display_init);
+
+MODULE_DEVICE_TABLE(of, mdss_spi_panel_match);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbdev/msm/mdss_spi_panel.h b/drivers/video/fbdev/msm/mdss_spi_panel.h
new file mode 100644
index 0000000..80b7ea8
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_spi_panel.h
@@ -0,0 +1,148 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MDSS_SPI_PANEL_H__
+#define __MDSS_SPI_PANEL_H__
+
+#if defined(CONFIG_FB_MSM_MDSS_SPI_PANEL) && defined(CONFIG_SPI_QUP)
+#include <linux/list.h>
+#include <linux/mdss_io_util.h>
+#include <linux/irqreturn.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/gpio.h>
+
+#include "mdss_panel.h"
+#include "mdp3_dma.h"
+
+#define MDSS_MAX_BL_BRIGHTNESS 255
+
+#define MDSS_SPI_RST_SEQ_LEN	10
+
+#define NONE_PANEL "none"
+
+#define CTRL_STATE_UNKNOWN		0x00
+#define CTRL_STATE_PANEL_INIT		BIT(0)
+#define CTRL_STATE_MDP_ACTIVE		BIT(1)
+
+#define MDSS_PINCTRL_STATE_DEFAULT "mdss_default"
+#define MDSS_PINCTRL_STATE_SLEEP  "mdss_sleep"
+#define SPI_PANEL_TE_TIMEOUT	400
+
+enum spi_panel_data_type {
+	panel_cmd,
+	panel_parameter,
+	panel_pixel,
+	UNKNOWN_FORMAT,
+};
+
+enum spi_panel_bl_ctrl {
+	SPI_BL_PWM,
+	SPI_BL_WLED,
+	SPI_BL_DCS_CMD,
+	SPI_UNKNOWN_CTRL,
+};
+
+struct spi_pinctrl_res {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *gpio_state_active;
+	struct pinctrl_state *gpio_state_suspend;
+};
+#define SPI_PANEL_DST_FORMAT_RGB565		0
+
+struct spi_ctrl_hdr {
+	char wait;	/* ms */
+	char dlen;	/* 8 bits */
+};
+
+struct spi_cmd_desc {
+	struct spi_ctrl_hdr dchdr;
+	char *command;
+	char *parameter;
+};
+
+struct spi_panel_cmds {
+	char *buf;
+	int blen;
+	struct spi_cmd_desc *cmds;
+	int cmd_cnt;
+};
+
+enum spi_panel_status_mode {
+	SPI_ESD_REG,
+	SPI_SEND_PANEL_COMMAND,
+	SPI_ESD_MAX,
+};
+
+
+struct spi_panel_data {
+	struct mdss_panel_data panel_data;
+	struct mdss_util_intf *mdss_util;
+	struct spi_pinctrl_res pin_res;
+	struct mdss_module_power panel_power_data;
+	struct completion spi_panel_te;
+	struct mdp3_notification vsync_client;
+	unsigned int vsync_status;
+	int byte_pre_frame;
+	char *tx_buf;
+	u8 ctrl_state;
+	int disp_te_gpio;
+	int rst_gpio;
+	int disp_dc_gpio;	/* command or data */
+	struct spi_panel_cmds on_cmds;
+	struct spi_panel_cmds off_cmds;
+	bool (*check_status)(struct spi_panel_data *pdata);
+	int (*on)(struct mdss_panel_data *pdata);
+	int (*off)(struct mdss_panel_data *pdata);
+	struct mutex spi_tx_mutex;
+	struct pwm_device *pwm_bl;
+	int bklt_ctrl;	/* backlight ctrl */
+	bool pwm_pmi;
+	int pwm_period;
+	int pwm_pmic_gpio;
+	int pwm_lpg_chan;
+	int pwm_enabled;
+	int bklt_max;
+	int status_mode;
+	u32 status_cmds_rlen;
+	u8 panel_status_reg;
+	u8 *exp_status_value;
+	u8 *act_status_value;
+	unsigned char *return_buf;
+};
+
+int mdss_spi_panel_kickoff(struct mdss_panel_data *pdata,
+				char *buf, int len, int stride);
+int is_spi_panel_continuous_splash_on(struct mdss_panel_data *pdata);
+void mdp3_spi_vsync_enable(struct mdss_panel_data *pdata,
+				struct mdp3_notification *vsync_client);
+void mdp3_check_spi_panel_status(struct work_struct *work,
+				uint32_t interval);
+
+#else
+static inline int mdss_spi_panel_kickoff(struct mdss_panel_data *pdata,
+				char *buf, int len, int stride){
+	return 0;
+}
+static inline int is_spi_panel_continuous_splash_on(
+				struct mdss_panel_data *pdata)
+{
+	return 0;
+}
+static inline int mdp3_spi_vsync_enable(struct mdss_panel_data *pdata,
+			struct mdp3_notification *vsync_client){
+	return 0;
+}
+
+#endif/* End of CONFIG_FB_MSM_MDSS_SPI_PANEL && ONFIG_SPI_QUP */
+
+#endif /* End of __MDSS_SPI_PANEL_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_util.c b/drivers/video/fbdev/msm/mdss_util.c
index 30fcf28..a5fb8b6 100644
--- a/drivers/video/fbdev/msm/mdss_util.c
+++ b/drivers/video/fbdev/msm/mdss_util.c
@@ -23,6 +23,7 @@
 {
 	unsigned long irq_flags;
 	u32 ndx_bit;
+	bool err = false;
 
 	if (!hw || hw->hw_ndx >= MDSS_MAX_HW_BLK)
 		return -EINVAL;
@@ -33,10 +34,12 @@
 	if (!mdss_irq_handlers[hw->hw_ndx])
 		mdss_irq_handlers[hw->hw_ndx] = hw;
 	else
-		pr_err("panel %d's irq at %pK is already registered\n",
-			hw->hw_ndx, hw->irq_handler);
+		err = true;
 	spin_unlock_irqrestore(&mdss_lock, irq_flags);
 
+	if (err)
+		pr_err("panel %d's irq at %pK is already registered\n",
+			hw->hw_ndx, hw->irq_handler);
 	return 0;
 }
 
@@ -76,6 +79,7 @@
 {
 	unsigned long irq_flags;
 	u32 ndx_bit;
+	bool err = false;
 
 	if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
 		return;
@@ -87,7 +91,7 @@
 
 	spin_lock_irqsave(&mdss_lock, irq_flags);
 	if (!(hw->irq_info->irq_mask & ndx_bit)) {
-		pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
+		err = true;
 	} else {
 		hw->irq_info->irq_mask &= ~ndx_bit;
 		if (hw->irq_info->irq_mask == 0) {
@@ -96,12 +100,16 @@
 		}
 	}
 	spin_unlock_irqrestore(&mdss_lock, irq_flags);
+
+	if (err)
+		pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
 }
 
 /* called from interrupt context */
 void mdss_disable_irq_nosync(struct mdss_hw *hw)
 {
 	u32 ndx_bit;
+	bool err = false;
 
 	if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
 		return;
@@ -113,7 +121,7 @@
 
 	spin_lock(&mdss_lock);
 	if (!(hw->irq_info->irq_mask & ndx_bit)) {
-		pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
+		err = true;
 	} else {
 		hw->irq_info->irq_mask &= ~ndx_bit;
 		if (hw->irq_info->irq_mask == 0) {
@@ -122,6 +130,9 @@
 		}
 	}
 	spin_unlock(&mdss_lock);
+
+	if (err)
+		pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
 }
 
 int mdss_irq_dispatch(u32 hw_ndx, int irq, void *ptr)
@@ -176,6 +187,7 @@
 {
 	unsigned long irq_flags;
 	u32 ndx_bit;
+	bool err = false;
 
 	if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
 		return;
@@ -188,7 +200,7 @@
 
 	spin_lock_irqsave(&mdss_lock, irq_flags);
 	if (!(hw->irq_info->irq_wake_mask & ndx_bit)) {
-		pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
+		err = true;
 	} else {
 		hw->irq_info->irq_wake_mask &= ~ndx_bit;
 		if (hw->irq_info->irq_wake_ena) {
@@ -197,6 +209,9 @@
 		}
 	}
 	spin_unlock_irqrestore(&mdss_lock, irq_flags);
+
+	if (err)
+		pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
 }
 
 static bool check_display(char *param_string)
diff --git a/drivers/video/fbdev/msm/msm_mdss_io_8974.c b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
index 153b39f..9ccd428 100644
--- a/drivers/video/fbdev/msm/msm_mdss_io_8974.c
+++ b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
@@ -417,7 +417,7 @@
 	wmb(); /* make sure phy timings are updated*/
 }
 
-static void mdss_dsi_ctrl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl)
+void mdss_dsi_ctrl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl)
 {
 	/* start phy sw reset */
 	MIPI_OUTP(ctrl->ctrl_base + 0x12c, 0x0001);
@@ -519,8 +519,7 @@
 	 * is only done from the clock master. This will ensure that the PLL is
 	 * off when PHY reset is called.
 	 */
-	if (mdss_dsi_is_ctrl_clk_slave(ctrl) ||
-		(ctrl->shared_data->phy_rev == DSI_PHY_REV_12NM))
+	if (mdss_dsi_is_ctrl_clk_slave(ctrl))
 		return;
 
 	mdss_dsi_phy_sw_reset_sub(ctrl);
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index a350209..31c301d 100644
--- a/drivers/video/fbdev/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
@@ -121,7 +121,7 @@
 		unsigned char __user *ured;
 		unsigned char __user *ugreen;
 		unsigned char __user *ublue;
-		int index, count, i;
+		unsigned int index, count, i;
 
 		if (get_user(index, &c->index) ||
 		    __get_user(count, &c->count) ||
@@ -160,7 +160,7 @@
 		unsigned char __user *ugreen;
 		unsigned char __user *ublue;
 		struct fb_cmap *cmap = &info->cmap;
-		int index, count, i;
+		unsigned int index, count, i;
 		u8 red, green, blue;
 
 		if (get_user(index, &c->index) ||
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index e682bf0..88cd2a5 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -566,7 +566,8 @@
 				char c;
 				if (get_user(c, buf + i))
 					return -EFAULT;
-				expect_close = (c == 'V');
+				if (c == 'V')
+					expect_close = true;
 			}
 
 			/* Properly order writes across fork()ed processes */
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index ce0c38b..37523f1 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -50,6 +50,7 @@
  */
 
 #include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -159,7 +160,7 @@
 	    !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0))
 		timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR);
 
-	timeleft += readq(gwdt->control_base + SBSA_GWDT_WCV) -
+	timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) -
 		    arch_counter_get_cntvct();
 
 	do_div(timeleft, gwdt->clk);
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
index 2b28c00..dfe20b8 100644
--- a/drivers/watchdog/sp5100_tco.h
+++ b/drivers/watchdog/sp5100_tco.h
@@ -54,7 +54,7 @@
 #define SB800_PM_WATCHDOG_CONFIG	0x4C
 
 #define SB800_PCI_WATCHDOG_DECODE_EN	(1 << 0)
-#define SB800_PM_WATCHDOG_DISABLE	(1 << 2)
+#define SB800_PM_WATCHDOG_DISABLE	(1 << 1)
 #define SB800_PM_WATCHDOG_SECOND_RES	(3 << 0)
 #define SB800_ACPI_MMIO_DECODE_EN	(1 << 0)
 #define SB800_ACPI_MMIO_SEL		(1 << 1)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index d5dbdb9..6d3b32c 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -764,8 +764,8 @@
 	mutex_unlock(&irq_mapping_update_lock);
 	return irq;
 error_irq:
-	for (; i >= 0; i--)
-		__unbind_from_irq(irq + i);
+	while (nvec--)
+		__unbind_from_irq(irq + nvec);
 	mutex_unlock(&irq_mapping_update_lock);
 	return ret;
 }
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index bb36b1e..775d419 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -327,7 +327,7 @@
 			if (entry->page) {
 				pr_debug("freeing g.e. %#x (pfn %#lx)\n",
 					 entry->ref, page_to_pfn(entry->page));
-				__free_page(entry->page);
+				put_page(entry->page);
 			} else
 				pr_info("freeing g.e. %#x\n", entry->ref);
 			kfree(entry);
@@ -383,7 +383,7 @@
 	if (gnttab_end_foreign_access_ref(ref, readonly)) {
 		put_free_entry(ref);
 		if (page != 0)
-			free_page(page);
+			put_page(virt_to_page(page));
 	} else
 		gnttab_add_deferred(ref, readonly,
 				    page ? virt_to_page(page) : NULL);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index b68ced5..2fe7353 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -359,7 +359,7 @@
 	 * physical address */
 	phys = xen_bus_to_phys(dev_addr);
 
-	if (((dev_addr + size - 1 > dma_mask)) ||
+	if (((dev_addr + size - 1 <= dma_mask)) ||
 	    range_straddles_page_boundary(phys, size))
 		xen_destroy_contiguous_region(phys, order);
 
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 4b85746..7ff9d25 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -362,9 +362,9 @@
 	}
 	/* There are more ACPI Processor objects than in x2APIC or MADT.
 	 * This can happen with incorrect ACPI SSDT declerations. */
-	if (acpi_id > nr_acpi_bits) {
-		pr_debug("We only have %u, trying to set %u\n",
-			 nr_acpi_bits, acpi_id);
+	if (acpi_id >= nr_acpi_bits) {
+		pr_debug("max acpi id %u, trying to set %u\n",
+			 nr_acpi_bits - 1, acpi_id);
 		return AE_OK;
 	}
 	/* OK, There is a ACPI Processor object */
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 33a31cf..c2d4476 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -470,8 +470,11 @@
 
 	/* Register with generic device framework. */
 	err = device_register(&xendev->dev);
-	if (err)
+	if (err) {
+		put_device(&xendev->dev);
+		xendev = NULL;
 		goto fail;
+	}
 
 	return 0;
 fail:
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index d295d98..8ec7938 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -16,6 +16,7 @@
 #include <linux/bitops.h>
 #include <linux/string.h>
 #include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
 #include <linux/slab.h>
 
 #include <asm/byteorder.h>
@@ -185,6 +186,17 @@
 		z->dev.parent = &bus->dev;
 		z->dev.bus = &zorro_bus_type;
 		z->dev.id = i;
+		switch (z->rom.er_Type & ERT_TYPEMASK) {
+		case ERT_ZORROIII:
+			z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+			break;
+
+		case ERT_ZORROII:
+		default:
+			z->dev.coherent_dma_mask = DMA_BIT_MASK(24);
+			break;
+		}
+		z->dev.dma_mask = &z->dev.coherent_dma_mask;
 	}
 
 	/* ... then register them */
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 29186d2..2d4d495 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -224,9 +224,10 @@
 
 	affs_lock_dir(dir);
 	bh = affs_find_entry(dir, dentry);
-	affs_unlock_dir(dir);
-	if (IS_ERR(bh))
+	if (IS_ERR(bh)) {
+		affs_unlock_dir(dir);
 		return ERR_CAST(bh);
+	}
 	if (bh) {
 		u32 ino = bh->b_blocknr;
 
@@ -240,10 +241,13 @@
 		}
 		affs_brelse(bh);
 		inode = affs_iget(sb, ino);
-		if (IS_ERR(inode))
+		if (IS_ERR(inode)) {
+			affs_unlock_dir(dir);
 			return ERR_CAST(inode);
+		}
 	}
 	d_add(dentry, inode);
+	affs_unlock_dir(dir);
 	return NULL;
 }
 
diff --git a/fs/aio.c b/fs/aio.c
index 0606f03..42d8c09 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1074,8 +1074,8 @@
 
 	ctx = rcu_dereference(table->table[id]);
 	if (ctx && ctx->user_id == ctx_id) {
-		percpu_ref_get(&ctx->users);
-		ret = ctx;
+		if (percpu_ref_tryget_live(&ctx->users))
+			ret = ctx;
 	}
 out:
 	rcu_read_unlock();
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index f6ba165..c94d339 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2486,10 +2486,8 @@
 	if (p->reada != READA_NONE)
 		reada_for_search(root, p, level, slot, key->objectid);
 
-	btrfs_release_path(p);
-
 	ret = -EAGAIN;
-	tmp = read_tree_block(root, blocknr, 0);
+	tmp = read_tree_block(root, blocknr, gen);
 	if (!IS_ERR(tmp)) {
 		/*
 		 * If the read above didn't mark this buffer up to date,
@@ -2503,6 +2501,8 @@
 	} else {
 		ret = PTR_ERR(tmp);
 	}
+
+	btrfs_release_path(p);
 	return ret;
 }
 
@@ -2760,6 +2760,8 @@
 		 * contention with the cow code
 		 */
 		if (cow) {
+			bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
+
 			/*
 			 * if we don't really need to cow this block
 			 * then we don't want to set the path blocking,
@@ -2784,9 +2786,13 @@
 			}
 
 			btrfs_set_path_blocking(p);
-			err = btrfs_cow_block(trans, root, b,
-					      p->nodes[level + 1],
-					      p->slots[level + 1], &b);
+			if (last_level)
+				err = btrfs_cow_block(trans, root, b, NULL, 0,
+						      &b);
+			else
+				err = btrfs_cow_block(trans, root, b,
+						      p->nodes[level + 1],
+						      p->slots[level + 1], &b);
 			if (err) {
 				ret = err;
 				goto done;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index c66054c..9557a31 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1281,7 +1281,7 @@
 	if (!writers)
 		return ERR_PTR(-ENOMEM);
 
-	ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
+	ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
 	if (ret < 0) {
 		kfree(writers);
 		return ERR_PTR(ret);
@@ -4142,9 +4142,11 @@
 		btrfs_err(fs_info, "no valid FS found");
 		ret = -EINVAL;
 	}
-	if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
-		btrfs_warn(fs_info, "unrecognized super flag: %llu",
+	if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
+		btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
 				btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
+		ret = -EINVAL;
+	}
 	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
 		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
 				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a29730c..44a4385 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4527,6 +4527,7 @@
 	if (wait_for_alloc) {
 		mutex_unlock(&fs_info->chunk_mutex);
 		wait_for_alloc = 0;
+		cond_resched();
 		goto again;
 	}
 
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c95ff09..4375448 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1912,10 +1912,19 @@
 static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
 {
 	int ret;
+	struct blk_plug plug;
 
+	/*
+	 * This is only called in fsync, which would do synchronous writes, so
+	 * a plug can merge adjacent IOs as much as possible.  Esp. in case of
+	 * multiple disks using raid profile, a large IO can be split to
+	 * several segments of stripe length (currently 64K).
+	 */
+	blk_start_plug(&plug);
 	atomic_inc(&BTRFS_I(inode)->sync_writers);
 	ret = btrfs_fdatawrite_range(inode, start, end);
 	atomic_dec(&BTRFS_I(inode)->sync_writers);
+	blk_finish_plug(&plug);
 
 	return ret;
 }
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index ffd5831..f073de6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6491,8 +6491,7 @@
 		goto out_unlock_inode;
 	} else {
 		btrfs_update_inode(trans, root, inode);
-		unlock_new_inode(inode);
-		d_instantiate(dentry, inode);
+		d_instantiate_new(dentry, inode);
 	}
 
 out_unlock:
@@ -6567,8 +6566,7 @@
 		goto out_unlock_inode;
 
 	BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 
 out_unlock:
 	btrfs_end_transaction(trans, root);
@@ -6711,12 +6709,7 @@
 	if (err)
 		goto out_fail_inode;
 
-	d_instantiate(dentry, inode);
-	/*
-	 * mkdir is special.  We're unlocking after we call d_instantiate
-	 * to avoid a race with nfsd calling d_instantiate.
-	 */
-	unlock_new_inode(inode);
+	d_instantiate_new(dentry, inode);
 	drop_on_err = 0;
 
 out_fail:
@@ -10354,8 +10347,7 @@
 		goto out_unlock_inode;
 	}
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 
 out_unlock:
 	btrfs_end_transaction(trans, root);
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index d016d4a..af6a776 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -2161,11 +2161,21 @@
 	}
 
 	/*
-	 * reconstruct from the q stripe if they are
-	 * asking for mirror 3
+	 * Loop retry:
+	 * for 'mirror == 2', reconstruct from all other stripes.
+	 * for 'mirror_num > 2', select a stripe to fail on every retry.
 	 */
-	if (mirror_num == 3)
-		rbio->failb = rbio->real_stripes - 2;
+	if (mirror_num > 2) {
+		/*
+		 * 'mirror == 3' is to fail the p stripe and
+		 * reconstruct from the q stripe.  'mirror > 3' is to
+		 * fail a data stripe and reconstruct from p+q stripe.
+		 */
+		rbio->failb = rbio->real_stripes - (mirror_num - 1);
+		ASSERT(rbio->failb > 0);
+		if (rbio->failb <= rbio->faila)
+			rbio->failb--;
+	}
 
 	ret = lock_stripe_add(rbio);
 
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index d040afc..c8d2eec 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4822,6 +4822,9 @@
 	u64 len;
 	int ret = 0;
 
+	if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
+		return send_update_extent(sctx, offset, end - offset);
+
 	p = fs_path_alloc();
 	if (!p)
 		return -ENOMEM;
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index ca7cb5e..9c66666 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -63,7 +63,7 @@
 	btrfs_set_extent_generation(leaf, item, 1);
 	btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_TREE_BLOCK);
 	block_info = (struct btrfs_tree_block_info *)(item + 1);
-	btrfs_set_tree_block_level(leaf, block_info, 1);
+	btrfs_set_tree_block_level(leaf, block_info, 0);
 	iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
 	if (parent > 0) {
 		btrfs_set_extent_inline_ref_type(leaf, iref,
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 5240173..44d3492 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2241,8 +2241,10 @@
 			nritems = btrfs_header_nritems(path->nodes[0]);
 			if (path->slots[0] >= nritems) {
 				ret = btrfs_next_leaf(root, path);
-				if (ret)
+				if (ret == 1)
 					break;
+				else if (ret < 0)
+					goto out;
 			}
 			btrfs_item_key_to_cpu(path->nodes[0], &found_key,
 					      path->slots[0]);
@@ -3397,8 +3399,11 @@
 		 * from this directory and from this transaction
 		 */
 		ret = btrfs_next_leaf(root, path);
-		if (ret == 1) {
-			last_offset = (u64)-1;
+		if (ret) {
+			if (ret == 1)
+				last_offset = (u64)-1;
+			else
+				err = ret;
 			goto done;
 		}
 		btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
@@ -3849,6 +3854,7 @@
 			ASSERT(ret == 0);
 			src = src_path->nodes[0];
 			i = 0;
+			need_find_last_extent = true;
 		}
 
 		btrfs_item_key_to_cpu(src, &key, i);
@@ -4614,6 +4620,7 @@
 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 	u64 logged_isize = 0;
 	bool need_log_inode_item = true;
+	bool xattrs_logged = false;
 
 	path = btrfs_alloc_path();
 	if (!path)
@@ -4918,6 +4925,7 @@
 	err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
 	if (err)
 		goto out_unlock;
+	xattrs_logged = true;
 	if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
 		btrfs_release_path(path);
 		btrfs_release_path(dst_path);
@@ -4930,6 +4938,11 @@
 	btrfs_release_path(dst_path);
 	if (need_log_inode_item) {
 		err = log_inode_item(trans, log, dst_path, inode);
+		if (!err && !xattrs_logged) {
+			err = btrfs_log_all_xattrs(trans, root, inode, path,
+						   dst_path);
+			btrfs_release_path(path);
+		}
 		if (err)
 			goto out_unlock;
 	}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 39b917b7..64ab90d 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3966,6 +3966,15 @@
 		return 0;
 	}
 
+	/*
+	 * A ro->rw remount sequence should continue with the paused balance
+	 * regardless of who pauses it, system or the user as of now, so set
+	 * the resume flag.
+	 */
+	spin_lock(&fs_info->balance_lock);
+	fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
+	spin_unlock(&fs_info->balance_lock);
+
 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
 	return PTR_ERR_OR_ZERO(tsk);
 }
@@ -5177,7 +5186,14 @@
 	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
 		ret = 2;
 	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
-		ret = 3;
+		/*
+		 * There could be two corrupted data stripes, we need
+		 * to loop retry in order to rebuild the correct data.
+		 *
+		 * Fail a stripe at a time on every retry except the
+		 * stripe under reconstruction.
+		 */
+		ret = map->num_stripes;
 	else
 		ret = 1;
 	free_extent_map(em);
diff --git a/fs/buffer.c b/fs/buffer.c
index 5d8f496..3a8064d 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1455,12 +1455,48 @@
 	return 0;
 }
 
+static void __evict_bh_lru(void *arg)
+{
+	struct bh_lru *b = &get_cpu_var(bh_lrus);
+	struct buffer_head *bh = arg;
+	int i;
+
+	for (i = 0; i < BH_LRU_SIZE; i++) {
+		if (b->bhs[i] == bh) {
+			brelse(b->bhs[i]);
+			b->bhs[i] = NULL;
+			goto out;
+		}
+	}
+out:
+	put_cpu_var(bh_lrus);
+}
+
+static bool bh_exists_in_lru(int cpu, void *arg)
+{
+	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
+	struct buffer_head *bh = arg;
+	int i;
+
+	for (i = 0; i < BH_LRU_SIZE; i++) {
+		if (b->bhs[i] == bh)
+			return 1;
+	}
+
+	return 0;
+
+}
 void invalidate_bh_lrus(void)
 {
 	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
 }
 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
 
+static void evict_bh_lrus(struct buffer_head *bh)
+{
+	on_each_cpu_cond(bh_exists_in_lru, __evict_bh_lru, bh, 1, GFP_ATOMIC);
+}
+
 void set_bh_page(struct buffer_head *bh,
 		struct page *page, unsigned long offset)
 {
@@ -3250,8 +3286,15 @@
 	do {
 		if (buffer_write_io_error(bh) && page->mapping)
 			mapping_set_error(page->mapping, -EIO);
-		if (buffer_busy(bh))
-			goto failed;
+		if (buffer_busy(bh)) {
+			/*
+			 * Check if the busy failure was due to an
+			 * outstanding LRU reference
+			 */
+			evict_bh_lrus(bh);
+			if (buffer_busy(bh))
+				goto failed;
+		}
 		bh = bh->b_this_page;
 	} while (bh != head);
 
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index b382e59..2a89030 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -816,7 +816,6 @@
 	int err;
 	unsigned long started = jiffies;  /* note the start time */
 	struct dentry *root;
-	int first = 0;   /* first vfsmount for this super_block */
 
 	dout("mount start %p\n", fsc);
 	mutex_lock(&fsc->client->mount_mutex);
@@ -834,17 +833,17 @@
 			path = fsc->mount_options->server_path + 1;
 			dout("mount opening path %s\n", path);
 		}
+
+		err = ceph_fs_debugfs_init(fsc);
+		if (err < 0)
+			goto out;
+
 		root = open_root_dentry(fsc, path, started);
 		if (IS_ERR(root)) {
 			err = PTR_ERR(root);
 			goto out;
 		}
 		fsc->sb->s_root = dget(root);
-		first = 1;
-
-		err = ceph_fs_debugfs_init(fsc);
-		if (err < 0)
-			goto fail;
 	} else {
 		root = dget(fsc->sb->s_root);
 	}
@@ -854,11 +853,6 @@
 	mutex_unlock(&fsc->client->mount_mutex);
 	return root;
 
-fail:
-	if (first) {
-		dput(fsc->sb->s_root);
-		fsc->sb->s_root = NULL;
-	}
 out:
 	mutex_unlock(&fsc->client->mount_mutex);
 	return ERR_PTR(err);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index cc420d6..d572228 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -6413,9 +6413,7 @@
 	pSMB->InformationLevel =
 		cpu_to_le16(SMB_SET_FILE_EA);
 
-	parm_data =
-		(struct fealist *) (((char *) &pSMB->hdr.Protocol) +
-				       offset);
+	parm_data = (void *)pSMB + offsetof(struct smb_hdr, Protocol) + offset;
 	pSMB->ParameterOffset = cpu_to_le16(param_offset);
 	pSMB->DataOffset = cpu_to_le16(offset);
 	pSMB->SetupCount = 1;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index d9cbda2..331ddd0 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -673,6 +673,9 @@
 		goto mknod_out;
 	}
 
+	if (!S_ISCHR(mode) && !S_ISBLK(mode))
+		goto mknod_out;
+
 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
 		goto mknod_out;
 
@@ -681,10 +684,8 @@
 
 	buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
 	if (buf == NULL) {
-		kfree(full_path);
 		rc = -ENOMEM;
-		free_xid(xid);
-		return rc;
+		goto mknod_out;
 	}
 
 	if (backup_cred(cifs_sb))
@@ -731,7 +732,7 @@
 		pdev->minor = cpu_to_le64(MINOR(device_number));
 		rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
 							&bytes_written, iov, 1);
-	} /* else if (S_ISFIFO) */
+	}
 	tcon->ses->server->ops->close(xid, tcon, &fid);
 	d_drop(direntry);
 
diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile
index cb49698..cc42e5e 100644
--- a/fs/crypto/Makefile
+++ b/fs/crypto/Makefile
@@ -1,4 +1,7 @@
 obj-$(CONFIG_FS_ENCRYPTION)	+= fscrypto.o
 
-fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o
+ccflags-y += -Ifs/ext4
+ccflags-y += -Ifs/f2fs
+
+fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o fscrypt_ice.o
 fscrypto-$(CONFIG_BLOCK) += bio.o
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index d7b4c48..c629e97 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -32,14 +32,18 @@
 
 	bio_for_each_segment_all(bv, bio, i) {
 		struct page *page = bv->bv_page;
-		int ret = fscrypt_decrypt_page(page->mapping->host, page,
-				PAGE_SIZE, 0, page->index);
 
-		if (ret) {
-			WARN_ON_ONCE(1);
-			SetPageError(page);
-		} else if (done) {
+		if (fscrypt_using_hardware_encryption(page->mapping->host)) {
 			SetPageUptodate(page);
+		} else {
+			int ret = fscrypt_decrypt_page(page->mapping->host,
+				page, PAGE_SIZE, 0, page->index);
+			if (ret) {
+				WARN_ON_ONCE(1);
+				SetPageError(page);
+			} else if (done) {
+				SetPageUptodate(page);
+			}
 		}
 		if (done)
 			unlock_page(page);
diff --git a/fs/crypto/fscrypt_ice.c b/fs/crypto/fscrypt_ice.c
new file mode 100644
index 0000000..62dae83
--- /dev/null
+++ b/fs/crypto/fscrypt_ice.c
@@ -0,0 +1,146 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "fscrypt_ice.h"
+
+int fscrypt_using_hardware_encryption(const struct inode *inode)
+{
+	struct fscrypt_info *ci = inode->i_crypt_info;
+
+	return S_ISREG(inode->i_mode) && ci &&
+		ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE;
+}
+EXPORT_SYMBOL(fscrypt_using_hardware_encryption);
+
+/*
+ * Retrieves encryption key from the inode
+ */
+char *fscrypt_get_ice_encryption_key(const struct inode *inode)
+{
+	struct fscrypt_info *ci = NULL;
+
+	if (!inode)
+		return NULL;
+
+	ci = inode->i_crypt_info;
+	if (!ci)
+		return NULL;
+
+	return &(ci->ci_raw_key[0]);
+}
+
+/*
+ * Retrieves encryption salt from the inode
+ */
+char *fscrypt_get_ice_encryption_salt(const struct inode *inode)
+{
+	struct fscrypt_info *ci = NULL;
+
+	if (!inode)
+		return NULL;
+
+	ci = inode->i_crypt_info;
+	if (!ci)
+		return NULL;
+
+	return &(ci->ci_raw_key[fscrypt_get_ice_encryption_key_size(inode)]);
+}
+
+/*
+ * returns true if the cipher mode in inode is AES XTS
+ */
+int fscrypt_is_aes_xts_cipher(const struct inode *inode)
+{
+	struct fscrypt_info *ci = inode->i_crypt_info;
+
+	if (!ci)
+		return 0;
+
+	return (ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE);
+}
+
+/*
+ * returns true if encryption info in both inodes is equal
+ */
+bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1,
+					const struct inode *inode2)
+{
+	char *key1 = NULL;
+	char *key2 = NULL;
+	char *salt1 = NULL;
+	char *salt2 = NULL;
+
+	if (!inode1 || !inode2)
+		return false;
+
+	if (inode1 == inode2)
+		return true;
+
+	/* both do not belong to ice, so we don't care, they are equal
+	 *for us
+	 */
+	if (!fscrypt_should_be_processed_by_ice(inode1) &&
+			!fscrypt_should_be_processed_by_ice(inode2))
+		return true;
+
+	/* one belongs to ice, the other does not -> not equal */
+	if (fscrypt_should_be_processed_by_ice(inode1) ^
+			fscrypt_should_be_processed_by_ice(inode2))
+		return false;
+
+	key1 = fscrypt_get_ice_encryption_key(inode1);
+	key2 = fscrypt_get_ice_encryption_key(inode2);
+	salt1 = fscrypt_get_ice_encryption_salt(inode1);
+	salt2 = fscrypt_get_ice_encryption_salt(inode2);
+
+	/* key and salt should not be null by this point */
+	if (!key1 || !key2 || !salt1 || !salt2 ||
+		(fscrypt_get_ice_encryption_key_size(inode1) !=
+		 fscrypt_get_ice_encryption_key_size(inode2)) ||
+		(fscrypt_get_ice_encryption_salt_size(inode1) !=
+		 fscrypt_get_ice_encryption_salt_size(inode2)))
+		return false;
+
+	if ((memcmp(key1, key2,
+			fscrypt_get_ice_encryption_key_size(inode1)) == 0) &&
+		(memcmp(salt1, salt2,
+			fscrypt_get_ice_encryption_salt_size(inode1)) == 0))
+		return true;
+
+	return false;
+}
+
+void fscrypt_set_ice_dun(const struct inode *inode, struct bio *bio, u64 dun)
+{
+	if (fscrypt_should_be_processed_by_ice(inode))
+		bio->bi_iter.bi_dun = dun;
+}
+EXPORT_SYMBOL(fscrypt_set_ice_dun);
+
+/*
+ * This function will be used for filesystem when deciding to merge bios.
+ * Basic assumption is, if inline_encryption is set, single bio has to
+ * guarantee consecutive LBAs as well as ino|pg->index.
+ */
+bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted)
+{
+	if (!bio)
+		return true;
+
+	/* if both of them are not encrypted, no further check is needed */
+	if (!bio_dun(bio) && !bio_encrypted)
+		return true;
+
+	/* ICE allows only consecutive iv_key stream. */
+	return bio_end_dun(bio) == dun;
+}
+EXPORT_SYMBOL(fscrypt_mergeable_bio);
diff --git a/fs/crypto/fscrypt_ice.h b/fs/crypto/fscrypt_ice.h
new file mode 100644
index 0000000..c540506
--- /dev/null
+++ b/fs/crypto/fscrypt_ice.h
@@ -0,0 +1,106 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _FSCRYPT_ICE_H
+#define _FSCRYPT_ICE_H
+
+#include <linux/blkdev.h>
+#include "fscrypt_private.h"
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode)
+{
+	if (!inode->i_sb->s_cop)
+		return 0;
+	if (!inode->i_sb->s_cop->is_encrypted((struct inode *)inode))
+	return 0;
+
+	return fscrypt_using_hardware_encryption(inode);
+}
+
+static inline int fscrypt_is_ice_capable(const struct super_block *sb)
+{
+	return blk_queue_inlinecrypt(bdev_get_queue(sb->s_bdev));
+}
+
+int fscrypt_is_aes_xts_cipher(const struct inode *inode);
+
+char *fscrypt_get_ice_encryption_key(const struct inode *inode);
+char *fscrypt_get_ice_encryption_salt(const struct inode *inode);
+
+bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1,
+					const struct inode *inode2);
+
+static inline size_t fscrypt_get_ice_encryption_key_size(
+					const struct inode *inode)
+{
+	return FS_AES_256_XTS_KEY_SIZE / 2;
+}
+
+static inline size_t fscrypt_get_ice_encryption_salt_size(
+					const struct inode *inode)
+{
+	return FS_AES_256_XTS_KEY_SIZE / 2;
+}
+#else
+static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode)
+{
+	return 0;
+}
+
+static inline int fscrypt_is_ice_capable(const struct super_block *sb)
+{
+	return 0;
+}
+
+static inline char *fscrypt_get_ice_encryption_key(const struct inode *inode)
+{
+	return NULL;
+}
+
+static inline char *fscrypt_get_ice_encryption_salt(const struct inode *inode)
+{
+	return NULL;
+}
+
+static inline size_t fscrypt_get_ice_encryption_key_size(
+					const struct inode *inode)
+{
+	return 0;
+}
+
+static inline size_t fscrypt_get_ice_encryption_salt_size(
+					const struct inode *inode)
+{
+	return 0;
+}
+
+static inline int fscrypt_is_xts_cipher(const struct inode *inode)
+{
+	return 0;
+}
+
+static inline bool fscrypt_is_ice_encryption_info_equal(
+					const struct inode *inode1,
+					const struct inode *inode2)
+{
+	return 0;
+}
+
+static inline int fscrypt_is_aes_xts_cipher(const struct inode *inode)
+{
+	return 0;
+}
+
+#endif
+
+#endif	/* _FSCRYPT_ICE_H */
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index d36a648..03ff3aa 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -11,9 +11,12 @@
 #ifndef _FSCRYPT_PRIVATE_H
 #define _FSCRYPT_PRIVATE_H
 
+#ifndef __FS_HAS_ENCRYPTION
 #define __FS_HAS_ENCRYPTION 1
+#endif
 #include <linux/fscrypt.h>
 #include <crypto/hash.h>
+#include <linux/pfk.h>
 
 /* Encryption parameters */
 #define FS_IV_SIZE			16
@@ -58,17 +61,25 @@
 	char encrypted_path[1];
 } __packed;
 
+enum ci_mode_info {
+	CI_NONE_MODE = 0,
+	CI_DATA_MODE,
+	CI_FNAME_MODE,
+};
+
 /*
  * A pointer to this structure is stored in the file system's in-core
  * representation of an inode.
  */
 struct fscrypt_info {
+	u8 ci_mode;
 	u8 ci_data_mode;
 	u8 ci_filename_mode;
 	u8 ci_flags;
 	struct crypto_skcipher *ci_ctfm;
 	struct crypto_cipher *ci_essiv_tfm;
 	u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
+	u8 ci_raw_key[FS_MAX_KEY_SIZE];
 };
 
 typedef enum {
@@ -90,9 +101,22 @@
 	    filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
 		return true;
 
+	if (contents_mode == FS_ENCRYPTION_MODE_SPECK128_256_XTS &&
+	    filenames_mode == FS_ENCRYPTION_MODE_SPECK128_256_CTS)
+		return true;
+
+	if (contents_mode == FS_ENCRYPTION_MODE_PRIVATE)
+		return true;
+
 	return false;
 }
 
+static inline bool is_private_data_mode(struct fscrypt_info *ci)
+{
+	return ci->ci_mode == CI_DATA_MODE &&
+		ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE;
+}
+
 /* crypto.c */
 extern struct kmem_cache *fscrypt_info_cachep;
 extern int fscrypt_initialize(unsigned int cop_flags);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index aae68c0..737b6fc 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -15,6 +15,7 @@
 #include <crypto/sha.h>
 #include <crypto/skcipher.h>
 #include "fscrypt_private.h"
+#include "fscrypt_ice.h"
 
 static struct crypto_shash *essiv_hash_tfm;
 
@@ -67,7 +68,7 @@
 }
 
 static int validate_user_key(struct fscrypt_info *crypt_info,
-			struct fscrypt_context *ctx, u8 *raw_key,
+			struct fscrypt_context *ctx,
 			const char *prefix, int min_keysize)
 {
 	char *description;
@@ -115,7 +116,24 @@
 		res = -ENOKEY;
 		goto out;
 	}
-	res = derive_key_aes(ctx->nonce, master_key, raw_key);
+	res = derive_key_aes(ctx->nonce, master_key, crypt_info->ci_raw_key);
+	/* If we don't need to derive, we still want to do everything
+	 * up until now to validate the key. It's cleaner to fail now
+	 * than to fail in block I/O.
+	if (!is_private_data_mode(crypt_info)) {
+		res = derive_key_aes(ctx->nonce, master_key,
+				crypt_info->ci_raw_key);
+	} else {
+		 * Inline encryption: no key derivation required because IVs are
+		 * assigned based on iv_sector.
+
+		BUILD_BUG_ON(sizeof(crypt_info->ci_raw_key) !=
+				sizeof(master_key->raw));
+		memcpy(crypt_info->ci_raw_key,
+			master_key->raw, sizeof(crypt_info->ci_raw_key));
+		res = 0;
+	}
+	 */
 out:
 	up_read(&keyring_key->sem);
 	key_put(keyring_key);
@@ -134,10 +152,14 @@
 					     FS_AES_128_CBC_KEY_SIZE },
 	[FS_ENCRYPTION_MODE_AES_128_CTS] = { "cts(cbc(aes))",
 					     FS_AES_128_CTS_KEY_SIZE },
+	[FS_ENCRYPTION_MODE_SPECK128_256_XTS] = { "xts(speck128)",	64 },
+	[FS_ENCRYPTION_MODE_SPECK128_256_CTS] = { "cts(cbc(speck128))",	32 },
+	[FS_ENCRYPTION_MODE_PRIVATE]	 = { "bugon",
+					     FS_AES_256_XTS_KEY_SIZE },
 };
 
 static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
-				 const char **cipher_str_ret, int *keysize_ret)
+		const char **cipher_str_ret, int *keysize_ret, int *fname)
 {
 	u32 mode;
 
@@ -149,9 +171,12 @@
 	}
 
 	if (S_ISREG(inode->i_mode)) {
+		ci->ci_mode = CI_DATA_MODE;
 		mode = ci->ci_data_mode;
 	} else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
+		ci->ci_mode = CI_FNAME_MODE;
 		mode = ci->ci_filename_mode;
+		*fname = 1;
 	} else {
 		WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n",
 			  inode->i_ino, (inode->i_mode & S_IFMT));
@@ -170,6 +195,7 @@
 
 	crypto_free_skcipher(ci->ci_ctfm);
 	crypto_free_cipher(ci->ci_essiv_tfm);
+	memset(ci, 0, sizeof(*ci)); /* sanitizes ->ci_raw_key */
 	kmem_cache_free(fscrypt_info_cachep, ci);
 }
 
@@ -239,6 +265,12 @@
 	crypto_free_shash(essiv_hash_tfm);
 }
 
+static int fscrypt_data_encryption_mode(struct inode *inode)
+{
+	return fscrypt_should_be_processed_by_ice(inode) ?
+	FS_ENCRYPTION_MODE_PRIVATE : FS_ENCRYPTION_MODE_AES_256_XTS;
+}
+
 int fscrypt_get_encryption_info(struct inode *inode)
 {
 	struct fscrypt_info *crypt_info;
@@ -246,8 +278,8 @@
 	struct crypto_skcipher *ctfm;
 	const char *cipher_str;
 	int keysize;
-	u8 *raw_key = NULL;
 	int res;
+	int fname = 0;
 
 	if (inode->i_crypt_info)
 		return 0;
@@ -264,7 +296,8 @@
 		/* Fake up a context for an unencrypted directory */
 		memset(&ctx, 0, sizeof(ctx));
 		ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
-		ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
+		ctx.contents_encryption_mode =
+			fscrypt_data_encryption_mode(inode);
 		ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
 		memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE);
 	} else if (res != sizeof(ctx)) {
@@ -289,7 +322,8 @@
 	memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
 				sizeof(crypt_info->ci_master_key));
 
-	res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize);
+	res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize,
+				&fname);
 	if (res)
 		goto out;
 
@@ -298,14 +332,11 @@
 	 * crypto API as part of key derivation.
 	 */
 	res = -ENOMEM;
-	raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS);
-	if (!raw_key)
-		goto out;
 
-	res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX,
+	res = validate_user_key(crypt_info, &ctx, FS_KEY_DESC_PREFIX,
 				keysize);
 	if (res && inode->i_sb->s_cop->key_prefix) {
-		int res2 = validate_user_key(crypt_info, &ctx, raw_key,
+		int res2 = validate_user_key(crypt_info, &ctx,
 					     inode->i_sb->s_cop->key_prefix,
 					     keysize);
 		if (res2) {
@@ -313,9 +344,23 @@
 				res = -ENOKEY;
 			goto out;
 		}
+		res = 0;
 	} else if (res) {
 		goto out;
 	}
+
+	if (is_private_data_mode(crypt_info)) {
+		if (!fscrypt_is_ice_capable(inode->i_sb)) {
+			pr_warn("%s: ICE support not available\n",
+					__func__);
+			res = -EINVAL;
+			goto out;
+		}
+		/* Let's encrypt/decrypt by ICE */
+		goto do_ice;
+	}
+
+
 	ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
 	if (!ctfm || IS_ERR(ctfm)) {
 		res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
@@ -330,26 +375,29 @@
 	 * if the provided key is longer than keysize, we use the first
 	 * keysize bytes of the derived key only
 	 */
-	res = crypto_skcipher_setkey(ctfm, raw_key, keysize);
+	res = crypto_skcipher_setkey(ctfm, crypt_info->ci_raw_key, keysize);
 	if (res)
 		goto out;
 
 	if (S_ISREG(inode->i_mode) &&
 	    crypt_info->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) {
-		res = init_essiv_generator(crypt_info, raw_key, keysize);
+		res = init_essiv_generator(crypt_info, crypt_info->ci_raw_key,
+						keysize);
 		if (res) {
 			pr_debug("%s: error %d (inode %lu) allocating essiv tfm\n",
 				 __func__, res, inode->i_ino);
 			goto out;
 		}
 	}
+	memzero_explicit(crypt_info->ci_raw_key,
+		sizeof(crypt_info->ci_raw_key));
+do_ice:
 	if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
 		crypt_info = NULL;
 out:
 	if (res == -ENOKEY)
 		res = 0;
 	put_crypt_info(crypt_info);
-	kzfree(raw_key);
 	return res;
 }
 EXPORT_SYMBOL(fscrypt_get_encryption_info);
diff --git a/fs/dcache.c b/fs/dcache.c
index 885f74e..5ff9b41 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1859,6 +1859,28 @@
 }
 EXPORT_SYMBOL(d_instantiate);
 
+/*
+ * This should be equivalent to d_instantiate() + unlock_new_inode(),
+ * with lockdep-related part of unlock_new_inode() done before
+ * anything else.  Use that instead of open-coding d_instantiate()/
+ * unlock_new_inode() combinations.
+ */
+void d_instantiate_new(struct dentry *entry, struct inode *inode)
+{
+	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
+	BUG_ON(!inode);
+	lockdep_annotate_inode_mutex_key(inode);
+	security_d_instantiate(entry, inode);
+	spin_lock(&inode->i_lock);
+	__d_instantiate(entry, inode);
+	WARN_ON(!(inode->i_state & I_NEW));
+	inode->i_state &= ~I_NEW;
+	smp_mb();
+	wake_up_bit(&inode->i_state, __I_NEW);
+	spin_unlock(&inode->i_lock);
+}
+EXPORT_SYMBOL(d_instantiate_new);
+
 /**
  * d_instantiate_no_diralias - instantiate a non-aliased dentry
  * @entry: dentry to complete
@@ -2452,7 +2474,7 @@
 
 retry:
 	rcu_read_lock();
-	seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1;
+	seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
 	r_seq = read_seqbegin(&rename_lock);
 	dentry = __d_lookup_rcu(parent, name, &d_seq);
 	if (unlikely(dentry)) {
@@ -2473,8 +2495,14 @@
 		rcu_read_unlock();
 		goto retry;
 	}
+
+	if (unlikely(seq & 1)) {
+		rcu_read_unlock();
+		goto retry;
+	}
+
 	hlist_bl_lock(b);
-	if (unlikely(parent->d_inode->i_dir_seq != seq)) {
+	if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
 		hlist_bl_unlock(b);
 		rcu_read_unlock();
 		goto retry;
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 354e2ab..0536072 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -22,7 +22,6 @@
 #include <linux/slab.h>
 #include <linux/atomic.h>
 #include <linux/device.h>
-#include <linux/srcu.h>
 #include <asm/poll.h>
 
 #include "internal.h"
@@ -48,66 +47,108 @@
 	.llseek =	noop_llseek,
 };
 
+#define F_DENTRY(filp) ((filp)->f_path.dentry)
+
+const struct file_operations *debugfs_real_fops(const struct file *filp)
+{
+	struct debugfs_fsdata *fsd = F_DENTRY(filp)->d_fsdata;
+
+	if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT) {
+		/*
+		 * Urgh, we've been called w/o a protecting
+		 * debugfs_file_get().
+		 */
+		WARN_ON(1);
+		return NULL;
+	}
+
+	return fsd->real_fops;
+}
+EXPORT_SYMBOL_GPL(debugfs_real_fops);
+
 /**
- * debugfs_use_file_start - mark the beginning of file data access
+ * debugfs_file_get - mark the beginning of file data access
  * @dentry: the dentry object whose data is being accessed.
- * @srcu_idx: a pointer to some memory to store a SRCU index in.
  *
- * Up to a matching call to debugfs_use_file_finish(), any
- * successive call into the file removing functions debugfs_remove()
- * and debugfs_remove_recursive() will block. Since associated private
+ * Up to a matching call to debugfs_file_put(), any successive call
+ * into the file removing functions debugfs_remove() and
+ * debugfs_remove_recursive() will block. Since associated private
  * file data may only get freed after a successful return of any of
  * the removal functions, you may safely access it after a successful
- * call to debugfs_use_file_start() without worrying about
- * lifetime issues.
+ * call to debugfs_file_get() without worrying about lifetime issues.
  *
  * If -%EIO is returned, the file has already been removed and thus,
  * it is not safe to access any of its data. If, on the other hand,
  * it is allowed to access the file data, zero is returned.
- *
- * Regardless of the return code, any call to
- * debugfs_use_file_start() must be followed by a matching call
- * to debugfs_use_file_finish().
  */
-int debugfs_use_file_start(const struct dentry *dentry, int *srcu_idx)
-	__acquires(&debugfs_srcu)
+int debugfs_file_get(struct dentry *dentry)
 {
-	*srcu_idx = srcu_read_lock(&debugfs_srcu);
-	barrier();
+	struct debugfs_fsdata *fsd;
+	void *d_fsd;
+
+	d_fsd = READ_ONCE(dentry->d_fsdata);
+	if (!((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) {
+		fsd = d_fsd;
+	} else {
+		fsd = kmalloc(sizeof(*fsd), GFP_KERNEL);
+		if (!fsd)
+			return -ENOMEM;
+
+		fsd->real_fops = (void *)((unsigned long)d_fsd &
+					~DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
+		refcount_set(&fsd->active_users, 1);
+		init_completion(&fsd->active_users_drained);
+		if (cmpxchg(&dentry->d_fsdata, d_fsd, fsd) != d_fsd) {
+			kfree(fsd);
+			fsd = READ_ONCE(dentry->d_fsdata);
+		}
+	}
+
+	/*
+	 * In case of a successful cmpxchg() above, this check is
+	 * strictly necessary and must follow it, see the comment in
+	 * __debugfs_remove_file().
+	 * OTOH, if the cmpxchg() hasn't been executed or wasn't
+	 * successful, this serves the purpose of not starving
+	 * removers.
+	 */
 	if (d_unlinked(dentry))
 		return -EIO;
+
+	if (!refcount_inc_not_zero(&fsd->active_users))
+		return -EIO;
+
 	return 0;
 }
-EXPORT_SYMBOL_GPL(debugfs_use_file_start);
+EXPORT_SYMBOL_GPL(debugfs_file_get);
 
 /**
- * debugfs_use_file_finish - mark the end of file data access
- * @srcu_idx: the SRCU index "created" by a former call to
- *            debugfs_use_file_start().
+ * debugfs_file_put - mark the end of file data access
+ * @dentry: the dentry object formerly passed to
+ *          debugfs_file_get().
  *
  * Allow any ongoing concurrent call into debugfs_remove() or
  * debugfs_remove_recursive() blocked by a former call to
- * debugfs_use_file_start() to proceed and return to its caller.
+ * debugfs_file_get() to proceed and return to its caller.
  */
-void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu)
+void debugfs_file_put(struct dentry *dentry)
 {
-	srcu_read_unlock(&debugfs_srcu, srcu_idx);
-}
-EXPORT_SYMBOL_GPL(debugfs_use_file_finish);
+	struct debugfs_fsdata *fsd = READ_ONCE(dentry->d_fsdata);
 
-#define F_DENTRY(filp) ((filp)->f_path.dentry)
+	if (refcount_dec_and_test(&fsd->active_users))
+		complete(&fsd->active_users_drained);
+}
+EXPORT_SYMBOL_GPL(debugfs_file_put);
 
 static int open_proxy_open(struct inode *inode, struct file *filp)
 {
-	const struct dentry *dentry = F_DENTRY(filp);
+	struct dentry *dentry = F_DENTRY(filp);
 	const struct file_operations *real_fops = NULL;
-	int srcu_idx, r;
+	int r;
 
-	r = debugfs_use_file_start(dentry, &srcu_idx);
-	if (r) {
-		r = -ENOENT;
-		goto out;
-	}
+	r = debugfs_file_get(dentry);
+	if (r)
+		return r == -EIO ? -ENOENT : r;
 
 	real_fops = debugfs_real_fops(filp);
 	real_fops = fops_get(real_fops);
@@ -124,7 +165,7 @@
 		r = real_fops->open(inode, filp);
 
 out:
-	debugfs_use_file_finish(srcu_idx);
+	debugfs_file_put(dentry);
 	return r;
 }
 
@@ -138,16 +179,16 @@
 #define FULL_PROXY_FUNC(name, ret_type, filp, proto, args)		\
 static ret_type full_proxy_ ## name(proto)				\
 {									\
-	const struct dentry *dentry = F_DENTRY(filp);			\
-	const struct file_operations *real_fops =			\
-		debugfs_real_fops(filp);				\
-	int srcu_idx;							\
+	struct dentry *dentry = F_DENTRY(filp);			\
+	const struct file_operations *real_fops;			\
 	ret_type r;							\
 									\
-	r = debugfs_use_file_start(dentry, &srcu_idx);			\
-	if (likely(!r))						\
-		r = real_fops->name(args);				\
-	debugfs_use_file_finish(srcu_idx);				\
+	r = debugfs_file_get(dentry);					\
+	if (unlikely(r))						\
+		return r;						\
+	real_fops = debugfs_real_fops(filp);				\
+	r = real_fops->name(args);					\
+	debugfs_file_put(dentry);					\
 	return r;							\
 }
 
@@ -172,18 +213,16 @@
 static unsigned int full_proxy_poll(struct file *filp,
 				struct poll_table_struct *wait)
 {
-	const struct dentry *dentry = F_DENTRY(filp);
-	const struct file_operations *real_fops = debugfs_real_fops(filp);
-	int srcu_idx;
+	struct dentry *dentry = F_DENTRY(filp);
 	unsigned int r = 0;
+	const struct file_operations *real_fops;
 
-	if (debugfs_use_file_start(dentry, &srcu_idx)) {
-		debugfs_use_file_finish(srcu_idx);
+	if (debugfs_file_get(dentry))
 		return POLLHUP;
-	}
 
+	real_fops = debugfs_real_fops(filp);
 	r = real_fops->poll(filp, wait);
-	debugfs_use_file_finish(srcu_idx);
+	debugfs_file_put(dentry);
 	return r;
 }
 
@@ -227,16 +266,14 @@
 
 static int full_proxy_open(struct inode *inode, struct file *filp)
 {
-	const struct dentry *dentry = F_DENTRY(filp);
+	struct dentry *dentry = F_DENTRY(filp);
 	const struct file_operations *real_fops = NULL;
 	struct file_operations *proxy_fops = NULL;
-	int srcu_idx, r;
+	int r;
 
-	r = debugfs_use_file_start(dentry, &srcu_idx);
-	if (r) {
-		r = -ENOENT;
-		goto out;
-	}
+	r = debugfs_file_get(dentry);
+	if (r)
+		return r == -EIO ? -ENOENT : r;
 
 	real_fops = debugfs_real_fops(filp);
 	real_fops = fops_get(real_fops);
@@ -274,7 +311,7 @@
 	kfree(proxy_fops);
 	fops_put(real_fops);
 out:
-	debugfs_use_file_finish(srcu_idx);
+	debugfs_file_put(dentry);
 	return r;
 }
 
@@ -285,13 +322,14 @@
 ssize_t debugfs_attr_read(struct file *file, char __user *buf,
 			size_t len, loff_t *ppos)
 {
+	struct dentry *dentry = F_DENTRY(file);
 	ssize_t ret;
-	int srcu_idx;
 
-	ret = debugfs_use_file_start(F_DENTRY(file), &srcu_idx);
-	if (likely(!ret))
-		ret = simple_attr_read(file, buf, len, ppos);
-	debugfs_use_file_finish(srcu_idx);
+	ret = debugfs_file_get(dentry);
+	if (unlikely(ret))
+		return ret;
+	ret = simple_attr_read(file, buf, len, ppos);
+	debugfs_file_put(dentry);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(debugfs_attr_read);
@@ -299,13 +337,14 @@
 ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
 			 size_t len, loff_t *ppos)
 {
+	struct dentry *dentry = F_DENTRY(file);
 	ssize_t ret;
-	int srcu_idx;
 
-	ret = debugfs_use_file_start(F_DENTRY(file), &srcu_idx);
-	if (likely(!ret))
-		ret = simple_attr_write(file, buf, len, ppos);
-	debugfs_use_file_finish(srcu_idx);
+	ret = debugfs_file_get(dentry);
+	if (unlikely(ret))
+		return ret;
+	ret = simple_attr_write(file, buf, len, ppos);
+	debugfs_file_put(dentry);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(debugfs_attr_write);
@@ -739,14 +778,14 @@
 {
 	char buf[3];
 	bool val;
-	int r, srcu_idx;
+	int r;
+	struct dentry *dentry = F_DENTRY(file);
 
-	r = debugfs_use_file_start(F_DENTRY(file), &srcu_idx);
-	if (likely(!r))
-		val = *(bool *)file->private_data;
-	debugfs_use_file_finish(srcu_idx);
-	if (r)
+	r = debugfs_file_get(dentry);
+	if (unlikely(r))
 		return r;
+	val = *(bool *)file->private_data;
+	debugfs_file_put(dentry);
 
 	if (val)
 		buf[0] = 'Y';
@@ -764,8 +803,9 @@
 	char buf[32];
 	size_t buf_size;
 	bool bv;
-	int r, srcu_idx;
+	int r;
 	bool *val = file->private_data;
+	struct dentry *dentry = F_DENTRY(file);
 
 	buf_size = min(count, (sizeof(buf)-1));
 	if (copy_from_user(buf, user_buf, buf_size))
@@ -773,12 +813,11 @@
 
 	buf[buf_size] = '\0';
 	if (strtobool(buf, &bv) == 0) {
-		r = debugfs_use_file_start(F_DENTRY(file), &srcu_idx);
-		if (likely(!r))
-			*val = bv;
-		debugfs_use_file_finish(srcu_idx);
-		if (r)
+		r = debugfs_file_get(dentry);
+		if (unlikely(r))
 			return r;
+		*val = bv;
+		debugfs_file_put(dentry);
 	}
 
 	return count;
@@ -840,14 +879,15 @@
 			      size_t count, loff_t *ppos)
 {
 	struct debugfs_blob_wrapper *blob = file->private_data;
+	struct dentry *dentry = F_DENTRY(file);
 	ssize_t r;
-	int srcu_idx;
 
-	r = debugfs_use_file_start(F_DENTRY(file), &srcu_idx);
-	if (likely(!r))
-		r = simple_read_from_buffer(user_buf, count, ppos, blob->data,
-					blob->size);
-	debugfs_use_file_finish(srcu_idx);
+	r = debugfs_file_get(dentry);
+	if (unlikely(r))
+		return r;
+	r = simple_read_from_buffer(user_buf, count, ppos, blob->data,
+				blob->size);
+	debugfs_file_put(dentry);
 	return r;
 }
 
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 3d7de9f..f7f672d 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -27,14 +27,11 @@
 #include <linux/parser.h>
 #include <linux/magic.h>
 #include <linux/slab.h>
-#include <linux/srcu.h>
 
 #include "internal.h"
 
 #define DEBUGFS_DEFAULT_MODE	0700
 
-DEFINE_SRCU(debugfs_srcu);
-
 static struct vfsmount *debugfs_mount;
 static int debugfs_mount_count;
 static bool debugfs_registered;
@@ -185,6 +182,14 @@
 	.evict_inode	= debugfs_evict_inode,
 };
 
+static void debugfs_release_dentry(struct dentry *dentry)
+{
+	void *fsd = dentry->d_fsdata;
+
+	if (!((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))
+		kfree(dentry->d_fsdata);
+}
+
 static struct vfsmount *debugfs_automount(struct path *path)
 {
 	debugfs_automount_t f;
@@ -194,6 +199,7 @@
 
 static const struct dentry_operations debugfs_dops = {
 	.d_delete = always_delete_dentry,
+	.d_release = debugfs_release_dentry,
 	.d_automount = debugfs_automount,
 };
 
@@ -324,7 +330,8 @@
 	inode->i_private = data;
 
 	inode->i_fop = proxy_fops;
-	dentry->d_fsdata = (void *)real_fops;
+	dentry->d_fsdata = (void *)((unsigned long)real_fops |
+				DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
 
 	d_instantiate(dentry, inode);
 	fsnotify_create(d_inode(dentry->d_parent), dentry);
@@ -581,18 +588,43 @@
 }
 EXPORT_SYMBOL_GPL(debugfs_create_symlink);
 
+static void __debugfs_remove_file(struct dentry *dentry, struct dentry *parent)
+{
+	struct debugfs_fsdata *fsd;
+
+	simple_unlink(d_inode(parent), dentry);
+	d_delete(dentry);
+
+	/*
+	 * Paired with the closing smp_mb() implied by a successful
+	 * cmpxchg() in debugfs_file_get(): either
+	 * debugfs_file_get() must see a dead dentry or we must see a
+	 * debugfs_fsdata instance at ->d_fsdata here (or both).
+	 */
+	smp_mb();
+	fsd = READ_ONCE(dentry->d_fsdata);
+	if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)
+		return;
+	if (!refcount_dec_and_test(&fsd->active_users))
+		wait_for_completion(&fsd->active_users_drained);
+}
+
 static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
 {
 	int ret = 0;
 
 	if (simple_positive(dentry)) {
 		dget(dentry);
-		if (d_is_dir(dentry))
-			ret = simple_rmdir(d_inode(parent), dentry);
-		else
-			simple_unlink(d_inode(parent), dentry);
-		if (!ret)
-			d_delete(dentry);
+		if (!d_is_reg(dentry)) {
+			if (d_is_dir(dentry))
+				ret = simple_rmdir(d_inode(parent), dentry);
+			else
+				simple_unlink(d_inode(parent), dentry);
+			if (!ret)
+				d_delete(dentry);
+		} else {
+			__debugfs_remove_file(dentry, parent);
+		}
 		dput(dentry);
 	}
 	return ret;
@@ -626,8 +658,6 @@
 	inode_unlock(d_inode(parent));
 	if (!ret)
 		simple_release_fs(&debugfs_mount, &debugfs_mount_count);
-
-	synchronize_srcu(&debugfs_srcu);
 }
 EXPORT_SYMBOL_GPL(debugfs_remove);
 
@@ -701,8 +731,6 @@
 	if (!__debugfs_remove(child, parent))
 		simple_release_fs(&debugfs_mount, &debugfs_mount_count);
 	inode_unlock(d_inode(parent));
-
-	synchronize_srcu(&debugfs_srcu);
 }
 EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
 
diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h
index b3e8443..49ba8dd 100644
--- a/fs/debugfs/internal.h
+++ b/fs/debugfs/internal.h
@@ -12,6 +12,8 @@
 #ifndef _DEBUGFS_INTERNAL_H_
 #define _DEBUGFS_INTERNAL_H_
 
+#include <linux/refcount.h>
+
 struct file_operations;
 
 /* declared over in file.c */
@@ -19,4 +21,18 @@
 extern const struct file_operations debugfs_open_proxy_file_operations;
 extern const struct file_operations debugfs_full_proxy_file_operations;
 
+struct debugfs_fsdata {
+	const struct file_operations *real_fops;
+	refcount_t active_users;
+	struct completion active_users_drained;
+};
+
+/*
+ * A dentry's ->d_fsdata either points to the real fops or to a
+ * dynamically allocated debugfs_fsdata instance.
+ * In order to distinguish between these two cases, a real fops
+ * pointer gets its lowest bit set.
+ */
+#define DEBUGFS_FSDATA_IS_REAL_FOPS_BIT BIT(0)
+
 #endif /* _DEBUGFS_INTERNAL_H_ */
diff --git a/fs/direct-io.c b/fs/direct-io.c
index c6220a2..bf03a92 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -411,6 +411,7 @@
 	if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
 		bio_set_pages_dirty(bio);
 
+	bio->bi_dio_inode = dio->inode;
 	dio->bio_bdev = bio->bi_bdev;
 
 	if (sdio->submit_io) {
@@ -424,6 +425,18 @@
 	sdio->logical_offset_in_bio = 0;
 }
 
+struct inode *dio_bio_get_inode(struct bio *bio)
+{
+	struct inode *inode = NULL;
+
+	if (bio == NULL)
+		return NULL;
+
+	inode = bio->bi_dio_inode;
+
+	return inode;
+}
+EXPORT_SYMBOL(dio_bio_get_inode);
 /*
  * Release any resources in case of a failure
  */
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index cf390dc..5c5ff9f 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -284,8 +284,7 @@
 		iget_failed(ecryptfs_inode);
 		goto out;
 	}
-	unlock_new_inode(ecryptfs_inode);
-	d_instantiate(ecryptfs_dentry, ecryptfs_inode);
+	d_instantiate_new(ecryptfs_dentry, ecryptfs_inode);
 out:
 	return rc;
 }
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 41b8b44..85449a6 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1258,21 +1258,11 @@
 
 static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
 {
-	/*
-	 * XXX: it seems like a bug here that we don't allow
-	 * IS_APPEND inode to have blocks-past-i_size trimmed off.
-	 * review and fix this.
-	 *
-	 * Also would be nice to be able to handle IO errors and such,
-	 * but that's probably too much to ask.
-	 */
 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 	    S_ISLNK(inode->i_mode)))
 		return;
 	if (ext2_inode_is_fast_symlink(inode))
 		return;
-	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
-		return;
 
 	dax_sem_down_write(EXT2_I(inode));
 	__ext2_truncate_blocks(inode, offset);
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 814e405..c8efc5e 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -40,8 +40,7 @@
 {
 	int err = ext2_add_link(dentry, inode);
 	if (!err) {
-		unlock_new_inode(inode);
-		d_instantiate(dentry, inode);
+		d_instantiate_new(dentry, inode);
 		return 0;
 	}
 	inode_dec_link_count(inode);
@@ -268,8 +267,7 @@
 	if (err)
 		goto out_fail;
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 out:
 	return err;
 
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index e38039f..e9232a0 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -109,10 +109,16 @@
 	  decrypted pages in the page cache.
 
 config EXT4_FS_ENCRYPTION
-	bool
-	default y
+	bool "Ext4 FS Encryption"
+	default n
 	depends on EXT4_ENCRYPTION
 
+config EXT4_FS_ICE_ENCRYPTION
+	bool "Ext4 Encryption with ICE support"
+	default n
+	depends on EXT4_FS_ENCRYPTION
+	depends on PFK
+
 config EXT4_DEBUG
 	bool "EXT4 debugging support"
 	depends on EXT4_FS
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 354103f..b9dfa0d 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -1,6 +1,7 @@
 #
 # Makefile for the linux ext4-filesystem routines.
 #
+ccflags-y += -Ifs/crypto
 
 obj-$(CONFIG_EXT4_FS) += ext4.o
 
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 176b4b2..6776f4a 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -320,6 +320,7 @@
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	ext4_grpblk_t offset;
 	ext4_grpblk_t next_zero_bit;
+	ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb);
 	ext4_fsblk_t blk;
 	ext4_fsblk_t group_first_block;
 
@@ -337,20 +338,25 @@
 	/* check whether block bitmap block number is set */
 	blk = ext4_block_bitmap(sb, desc);
 	offset = blk - group_first_block;
-	if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
+	if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+	    !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
 		/* bad block bitmap */
 		return blk;
 
 	/* check whether the inode bitmap block number is set */
 	blk = ext4_inode_bitmap(sb, desc);
 	offset = blk - group_first_block;
-	if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
+	if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+	    !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
 		/* bad block bitmap */
 		return blk;
 
 	/* check whether the inode table block number is set */
 	blk = ext4_inode_table(sb, desc);
 	offset = blk - group_first_block;
+	if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+	    EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit)
+		return blk;
 	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
 			EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group),
 			EXT4_B2C(sbi, offset));
@@ -416,6 +422,7 @@
 ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
 {
 	struct ext4_group_desc *desc;
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	struct buffer_head *bh;
 	ext4_fsblk_t bitmap_blk;
 	int err;
@@ -424,6 +431,12 @@
 	if (!desc)
 		return ERR_PTR(-EFSCORRUPTED);
 	bitmap_blk = ext4_block_bitmap(sb, desc);
+	if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+	    (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
+		ext4_error(sb, "Invalid block bitmap block %llu in "
+			   "block_group %u", bitmap_blk, block_group);
+		return ERR_PTR(-EFSCORRUPTED);
+	}
 	bh = sb_getblk(sb, bitmap_blk);
 	if (unlikely(!bh)) {
 		ext4_error(sb, "Cannot get buffer for block bitmap - "
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index a8573fa..fb9ae76 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -5358,8 +5358,9 @@
 	stop = le32_to_cpu(extent->ee_block);
 
        /*
-	 * In case of left shift, Don't start shifting extents until we make
-	 * sure the hole is big enough to accommodate the shift.
+	* For left shifts, make sure the hole on the left is big enough to
+	* accommodate the shift.  For right shifts, make sure the last extent
+	* won't be shifted beyond EXT_MAX_BLOCKS.
 	*/
 	if (SHIFT == SHIFT_LEFT) {
 		path = ext4_find_extent(inode, start - 1, &path,
@@ -5379,9 +5380,14 @@
 
 		if ((start == ex_start && shift > ex_start) ||
 		    (shift > start - ex_end)) {
-			ext4_ext_drop_refs(path);
-			kfree(path);
-			return -EINVAL;
+			ret = -EINVAL;
+			goto out;
+		}
+	} else {
+		if (shift > EXT_MAX_BLOCKS -
+		    (stop + ext4_ext_get_actual_len(extent))) {
+			ret = -EINVAL;
+			goto out;
 		}
 	}
 
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index ef76b83..1ee26da 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -119,6 +119,7 @@
 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
 {
 	struct ext4_group_desc *desc;
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	struct buffer_head *bh = NULL;
 	ext4_fsblk_t bitmap_blk;
 	int err;
@@ -128,6 +129,12 @@
 		return ERR_PTR(-EFSCORRUPTED);
 
 	bitmap_blk = ext4_inode_bitmap(sb, desc);
+	if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+	    (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
+		ext4_error(sb, "Invalid inode bitmap blk %llu in "
+			   "block_group %u", bitmap_blk, block_group);
+		return ERR_PTR(-EFSCORRUPTED);
+	}
 	bh = sb_getblk(sb, bitmap_blk);
 	if (unlikely(!bh)) {
 		ext4_error(sb, "Cannot read inode bitmap - "
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 2bf83d0..ecee29a 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -42,6 +42,7 @@
 #include "xattr.h"
 #include "acl.h"
 #include "truncate.h"
+//#include "ext4_ice.h"
 
 #include <trace/events/ext4.h>
 #include <trace/events/android_fs.h>
@@ -1152,7 +1153,8 @@
 			ll_rw_block(REQ_OP_READ, 0, 1, &bh);
 			*wait_bh++ = bh;
 			decrypt = ext4_encrypted_inode(inode) &&
-				S_ISREG(inode->i_mode);
+				S_ISREG(inode->i_mode) &&
+				!fscrypt_using_hardware_encryption(inode);
 		}
 	}
 	/*
@@ -3509,8 +3511,9 @@
 		get_block_func = ext4_dio_get_block_unwritten_async;
 		dio_flags = DIO_LOCKING;
 	}
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
-	BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
+#if defined(CONFIG_EXT4_FS_ENCRYPTION)
+	WARN_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
+		&& !fscrypt_using_hardware_encryption(inode));
 #endif
 	if (IS_DAX(inode)) {
 		ret = dax_do_io(iocb, inode, iter, get_block_func,
@@ -3631,8 +3634,9 @@
 	ssize_t ret;
 	int rw = iov_iter_rw(iter);
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
-	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+#if defined(CONFIG_EXT4_FS_ENCRYPTION)
+	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
+		&& !fscrypt_using_hardware_encryption(inode))
 		return 0;
 #endif
 
@@ -3828,7 +3832,8 @@
 		if (!buffer_uptodate(bh))
 			goto unlock;
 		if (S_ISREG(inode->i_mode) &&
-		    ext4_encrypted_inode(inode)) {
+		    ext4_encrypted_inode(inode) &&
+		    !fscrypt_using_hardware_encryption(inode)) {
 			/* We expect the key to be set. */
 			BUG_ON(!fscrypt_has_encryption_key(inode));
 			BUG_ON(blocksize != PAGE_SIZE);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 15ca15c..3585e26 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -774,8 +774,8 @@
 		return ext4_ext_precache(inode);
 
 	case EXT4_IOC_SET_ENCRYPTION_POLICY:
-		if (!ext4_has_feature_encrypt(sb))
-			return -EOPNOTSUPP;
+//		if (!ext4_has_feature_encrypt(sb))
+//			return -EOPNOTSUPP;
 		return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
 
 	case EXT4_IOC_GET_ENCRYPTION_PWSALT: {
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index e3183e8..d536e0a 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2387,8 +2387,7 @@
 	int err = ext4_add_entry(handle, dentry, inode);
 	if (!err) {
 		ext4_mark_inode_dirty(handle, inode);
-		unlock_new_inode(inode);
-		d_instantiate(dentry, inode);
+		d_instantiate_new(dentry, inode);
 		return 0;
 	}
 	drop_nlink(inode);
@@ -2627,8 +2626,7 @@
 	err = ext4_mark_inode_dirty(handle, dir);
 	if (err)
 		goto out_clear_inode;
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	if (IS_DIRSYNC(dir))
 		ext4_handle_sync(handle);
 
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 0718a86..8d4ec1a 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -28,6 +28,7 @@
 #include "ext4_jbd2.h"
 #include "xattr.h"
 #include "acl.h"
+//#include "ext4_ice.h"
 
 static struct kmem_cache *io_end_cachep;
 
@@ -469,6 +470,7 @@
 		gfp_t gfp_flags = GFP_NOFS;
 
 	retry_encrypt:
+	if (!fscrypt_using_hardware_encryption(inode))
 		data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, 0,
 						page->index, gfp_flags);
 		if (IS_ERR(data_page)) {
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index e95b6e1..23c5716 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1180,6 +1180,11 @@
 		EXT4_NAME_LEN;
 }
 
+static inline bool ext4_is_encrypted(struct inode *inode)
+{
+	return ext4_encrypted_inode(inode);
+}
+
 static const struct fscrypt_operations ext4_cryptops = {
 	.key_prefix		= "ext4:",
 	.get_context		= ext4_get_context,
@@ -1187,6 +1192,7 @@
 	.dummy_context		= ext4_dummy_context,
 	.empty_dir		= ext4_empty_dir,
 	.max_namelen		= ext4_max_namelen,
+	.is_encrypted       = ext4_is_encrypted,
 };
 #endif
 
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index b1fd4e2..9efe77e 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -437,6 +437,7 @@
 	struct bio *bio;
 	struct page *page = fio->encrypted_page ?
 			fio->encrypted_page : fio->page;
+	struct inode *inode = fio->page->mapping->host;
 
 	verify_block_addr(fio, fio->new_blkaddr);
 	trace_f2fs_submit_page_bio(page, fio);
@@ -446,6 +447,9 @@
 	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
 				1, is_read_io(fio->op), fio->type, fio->temp);
 
+	if (f2fs_may_encrypt_bio(inode, fio))
+	fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, fio->page));
+
 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 		bio_put(bio);
 		return -EFAULT;
@@ -465,6 +469,9 @@
 	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
 	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
 	struct page *bio_page;
+	struct inode *inode;
+	bool bio_encrypted;
+	u64 dun;
 	int err = 0;
 
 	f2fs_bug_on(sbi, is_read_io(fio->op));
@@ -488,6 +495,9 @@
 	verify_block_addr(fio, fio->new_blkaddr);
 
 	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+	inode = fio->page->mapping->host;
+	dun = PG_DUN(inode, fio->page);
+	bio_encrypted = f2fs_may_encrypt_bio(inode, fio);
 
 	/* set submitted = true as a return value */
 	fio->submitted = true;
@@ -498,6 +508,11 @@
 	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
 			!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
 		__submit_merged_bio(io);
+
+	/* ICE support */
+	if (!fscrypt_mergeable_bio(io->bio, dun, bio_encrypted))
+		__submit_merged_bio(io);
+
 alloc_new:
 	if (io->bio == NULL) {
 		if ((fio->type == DATA || fio->type == NODE) &&
@@ -509,6 +524,9 @@
 		io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
 						BIO_MAX_PAGES, false,
 						fio->type, fio->temp);
+		if (bio_encrypted)
+			fscrypt_set_ice_dun(inode, io->bio, dun);
+
 		io->fio = *fio;
 	}
 
@@ -575,6 +593,9 @@
 	if (IS_ERR(bio))
 		return PTR_ERR(bio);
 
+	if (f2fs_may_encrypt_bio(inode, NULL))
+		fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, page));
+
 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 		bio_put(bio);
 		return -EFAULT;
@@ -1434,6 +1455,8 @@
 	sector_t last_block_in_file;
 	sector_t block_nr;
 	struct f2fs_map_blocks map;
+	bool bio_encrypted;
+	u64 dun;
 
 	map.m_pblk = 0;
 	map.m_lblk = 0;
@@ -1511,6 +1534,14 @@
 			__submit_bio(F2FS_I_SB(inode), bio, DATA);
 			bio = NULL;
 		}
+
+		dun = PG_DUN(inode, page);
+		bio_encrypted = f2fs_may_encrypt_bio(inode, NULL);
+		if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted)) {
+			__submit_bio(F2FS_I_SB(inode), bio, DATA);
+			bio = NULL;
+		}
+
 		if (bio == NULL) {
 			bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
 			if (IS_ERR(bio)) {
@@ -1518,7 +1549,8 @@
 				goto set_error_page;
 			}
 		}
-
+		if (bio_encrypted)
+			fscrypt_set_ice_dun(inode, bio, dun);
 		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
 			goto submit_and_realloc;
 
@@ -1588,6 +1620,9 @@
 	f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
 
 retry_encrypt:
+	if (fscrypt_using_hardware_encryption(inode))
+		return 0;
+
 	fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
 			PAGE_SIZE, 0, fio->page->index, gfp_flags);
 	if (!IS_ERR(fio->encrypted_page))
@@ -1903,7 +1938,13 @@
 
 redirty_out:
 	redirty_page_for_writepage(wbc, page);
-	if (!err)
+	/*
+	 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
+	 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
+	 * file_write_and_wait_range() will see EIO error, which is critical
+	 * to return value of fsync() followed by atomic_write failure to user.
+	 */
+	if (!err || wbc->for_reclaim)
 		return AOP_WRITEPAGE_ACTIVATE;
 	unlock_page(page);
 	return err;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 84be1e7..65e8963 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -177,15 +177,12 @@
 #define	CP_DISCARD	0x00000010
 #define CP_TRIMMED	0x00000020
 
-#define DEF_BATCHED_TRIM_SECTIONS	2048
-#define BATCHED_TRIM_SEGMENTS(sbi)	\
-		(GET_SEG_FROM_SEC(sbi, SM_I(sbi)->trim_sections))
-#define BATCHED_TRIM_BLOCKS(sbi)	\
-		(BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg)
 #define MAX_DISCARD_BLOCKS(sbi)		BLKS_PER_SEC(sbi)
 #define DEF_MAX_DISCARD_REQUEST		8	/* issue 8 discards per round */
+#define DEF_MAX_DISCARD_LEN		512	/* Max. 2MB per discard */
 #define DEF_MIN_DISCARD_ISSUE_TIME	50	/* 50 ms, if exists */
 #define DEF_MAX_DISCARD_ISSUE_TIME	60000	/* 60 s, if no candidates */
+#define DEF_DISCARD_URGENT_UTIL		80	/* do more discard over 80% */
 #define DEF_CP_INTERVAL			60	/* 60 secs */
 #define DEF_IDLE_INTERVAL		5	/* 5 secs */
 
@@ -692,7 +689,8 @@
 static inline bool __is_discard_mergeable(struct discard_info *back,
 						struct discard_info *front)
 {
-	return back->lstart + back->len == front->lstart;
+	return (back->lstart + back->len == front->lstart) &&
+		(back->len + front->len < DEF_MAX_DISCARD_LEN);
 }
 
 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
@@ -1078,6 +1076,7 @@
 enum fsync_mode {
 	FSYNC_MODE_POSIX,	/* fsync follows posix semantics */
 	FSYNC_MODE_STRICT,	/* fsync behaves in line with ext4 */
+	FSYNC_MODE_NOBARRIER,	/* fsync behaves nobarrier based on posix */
 };
 
 #ifdef CONFIG_F2FS_FS_ENCRYPTION
@@ -2802,8 +2801,6 @@
 void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
 bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
-void init_discard_policy(struct discard_policy *dpolicy, int discard_type,
-						unsigned int granularity);
 void drop_discard_cmd(struct f2fs_sb_info *sbi);
 void stop_discard_thread(struct f2fs_sb_info *sbi);
 bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);
@@ -3321,9 +3318,20 @@
 
 static inline bool f2fs_force_buffered_io(struct inode *inode, int rw)
 {
-	return (f2fs_post_read_required(inode) ||
+	return ((f2fs_post_read_required(inode) &&
+		!fscrypt_using_hardware_encryption(inode)) ||
 			(rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
 			F2FS_I_SB(inode)->s_ndevs);
 }
 
+static inline bool f2fs_may_encrypt_bio(struct inode *inode,
+		struct f2fs_io_info *fio)
+{
+	if (fio && (fio->type != DATA || fio->encrypted_page))
+		return false;
+
+	return (f2fs_encrypted_file(inode) &&
+			fscrypt_using_hardware_encryption(inode));
+}
+
 #endif
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 0e39c77..44a2e32 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -308,7 +308,7 @@
 	remove_ino_entry(sbi, ino, APPEND_INO);
 	clear_inode_flag(inode, FI_APPEND_WRITE);
 flush_out:
-	if (!atomic)
+	if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
 		ret = f2fs_issue_flush(sbi, inode->i_ino);
 	if (!ret) {
 		remove_ino_entry(sbi, ino, UPDATE_INO);
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 604d3fc..66044fa 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -693,6 +693,7 @@
 		dec_page_count(fio.sbi, F2FS_DIRTY_META);
 
 	set_page_writeback(fio.encrypted_page);
+	ClearPageError(page);
 
 	/* allocate block address */
 	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 26832e7..156ac4f 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -157,6 +157,7 @@
 
 	/* write data page to try to make data consistent */
 	set_page_writeback(page);
+	ClearPageError(page);
 	fio.old_blkaddr = dn->data_blkaddr;
 	set_inode_flag(dn->inode, FI_HOT_DATA);
 	write_data_page(dn, &fio);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 392d1ed..f1e1ff1 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -294,8 +294,7 @@
 
 	alloc_nid_done(sbi, ino);
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 
 	if (IS_DIRSYNC(dir))
 		f2fs_sync_fs(sbi->sb, 1);
@@ -597,8 +596,7 @@
 	err = page_symlink(inode, disk_link.name, disk_link.len);
 
 err_out:
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 
 	/*
 	 * Let's flush symlink data in order to avoid broken symlink as much as
@@ -661,8 +659,7 @@
 
 	alloc_nid_done(sbi, inode->i_ino);
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 
 	if (IS_DIRSYNC(dir))
 		f2fs_sync_fs(sbi->sb, 1);
@@ -713,8 +710,7 @@
 
 	alloc_nid_done(sbi, inode->i_ino);
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 
 	if (IS_DIRSYNC(dir))
 		f2fs_sync_fs(sbi->sb, 1);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index ccf410a..803a010 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1398,6 +1398,7 @@
 		fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
 
 	set_page_writeback(page);
+	ClearPageError(page);
 	fio.old_blkaddr = ni.blk_addr;
 	write_node_page(nid, &fio);
 	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index fc4ee38..98fe1ed 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -915,6 +915,39 @@
 #endif
 }
 
+static void __init_discard_policy(struct f2fs_sb_info *sbi,
+				struct discard_policy *dpolicy,
+				int discard_type, unsigned int granularity)
+{
+	/* common policy */
+	dpolicy->type = discard_type;
+	dpolicy->sync = true;
+	dpolicy->granularity = granularity;
+
+	dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
+	dpolicy->io_aware_gran = MAX_PLIST_NUM;
+
+	if (discard_type == DPOLICY_BG) {
+		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
+		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
+		dpolicy->io_aware = true;
+		dpolicy->sync = false;
+		if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
+			dpolicy->granularity = 1;
+			dpolicy->max_interval = DEF_MIN_DISCARD_ISSUE_TIME;
+		}
+	} else if (discard_type == DPOLICY_FORCE) {
+		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
+		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
+		dpolicy->io_aware = false;
+	} else if (discard_type == DPOLICY_FSTRIM) {
+		dpolicy->io_aware = false;
+	} else if (discard_type == DPOLICY_UMOUNT) {
+		dpolicy->io_aware = false;
+	}
+}
+
+
 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
 static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
 						struct discard_policy *dpolicy,
@@ -1130,68 +1163,6 @@
 	return 0;
 }
 
-static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
-					struct discard_policy *dpolicy,
-					unsigned int start, unsigned int end)
-{
-	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
-	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
-	struct rb_node **insert_p = NULL, *insert_parent = NULL;
-	struct discard_cmd *dc;
-	struct blk_plug plug;
-	int issued;
-
-next:
-	issued = 0;
-
-	mutex_lock(&dcc->cmd_lock);
-	f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
-
-	dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
-					NULL, start,
-					(struct rb_entry **)&prev_dc,
-					(struct rb_entry **)&next_dc,
-					&insert_p, &insert_parent, true);
-	if (!dc)
-		dc = next_dc;
-
-	blk_start_plug(&plug);
-
-	while (dc && dc->lstart <= end) {
-		struct rb_node *node;
-
-		if (dc->len < dpolicy->granularity)
-			goto skip;
-
-		if (dc->state != D_PREP) {
-			list_move_tail(&dc->list, &dcc->fstrim_list);
-			goto skip;
-		}
-
-		__submit_discard_cmd(sbi, dpolicy, dc);
-
-		if (++issued >= dpolicy->max_requests) {
-			start = dc->lstart + dc->len;
-
-			blk_finish_plug(&plug);
-			mutex_unlock(&dcc->cmd_lock);
-
-			schedule();
-
-			goto next;
-		}
-skip:
-		node = rb_next(&dc->rb_node);
-		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
-
-		if (fatal_signal_pending(current))
-			break;
-	}
-
-	blk_finish_plug(&plug);
-	mutex_unlock(&dcc->cmd_lock);
-}
-
 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
 					struct discard_policy *dpolicy)
 {
@@ -1332,7 +1303,18 @@
 static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
 						struct discard_policy *dpolicy)
 {
-	__wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
+	struct discard_policy dp;
+
+	if (dpolicy) {
+		__wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
+		return;
+	}
+
+	/* wait all */
+	__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
+	__wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+	__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
+	__wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
 }
 
 /* This should be covered by global mutex, &sit_i->sentry_lock */
@@ -1377,11 +1359,13 @@
 	struct discard_policy dpolicy;
 	bool dropped;
 
-	init_discard_policy(&dpolicy, DPOLICY_UMOUNT, dcc->discard_granularity);
+	__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
+					dcc->discard_granularity);
 	__issue_discard_cmd(sbi, &dpolicy);
 	dropped = __drop_discard_cmd(sbi);
-	__wait_all_discard_cmd(sbi, &dpolicy);
 
+	/* just to make sure there is no pending discard commands */
+	__wait_all_discard_cmd(sbi, NULL);
 	return dropped;
 }
 
@@ -1397,7 +1381,7 @@
 	set_freezable();
 
 	do {
-		init_discard_policy(&dpolicy, DPOLICY_BG,
+		__init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
 					dcc->discard_granularity);
 
 		wait_event_interruptible_timeout(*q,
@@ -1415,7 +1399,7 @@
 			dcc->discard_wake = 0;
 
 		if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
-			init_discard_policy(&dpolicy, DPOLICY_FORCE, 1);
+			__init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
 
 		sb_start_intwrite(sbi->sb);
 
@@ -1708,32 +1692,6 @@
 	wake_up_discard_thread(sbi, false);
 }
 
-void init_discard_policy(struct discard_policy *dpolicy,
-				int discard_type, unsigned int granularity)
-{
-	/* common policy */
-	dpolicy->type = discard_type;
-	dpolicy->sync = true;
-	dpolicy->granularity = granularity;
-
-	dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
-	dpolicy->io_aware_gran = MAX_PLIST_NUM;
-
-	if (discard_type == DPOLICY_BG) {
-		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
-		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
-		dpolicy->io_aware = true;
-	} else if (discard_type == DPOLICY_FORCE) {
-		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
-		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
-		dpolicy->io_aware = false;
-	} else if (discard_type == DPOLICY_FSTRIM) {
-		dpolicy->io_aware = false;
-	} else if (discard_type == DPOLICY_UMOUNT) {
-		dpolicy->io_aware = false;
-	}
-}
-
 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
 {
 	dev_t dev = sbi->sb->s_bdev->bd_dev;
@@ -2373,11 +2331,72 @@
 	return has_candidate;
 }
 
+static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
+					struct discard_policy *dpolicy,
+					unsigned int start, unsigned int end)
+{
+	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
+	struct rb_node **insert_p = NULL, *insert_parent = NULL;
+	struct discard_cmd *dc;
+	struct blk_plug plug;
+	int issued;
+
+next:
+	issued = 0;
+
+	mutex_lock(&dcc->cmd_lock);
+	f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
+
+	dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
+					NULL, start,
+					(struct rb_entry **)&prev_dc,
+					(struct rb_entry **)&next_dc,
+					&insert_p, &insert_parent, true);
+	if (!dc)
+		dc = next_dc;
+
+	blk_start_plug(&plug);
+
+	while (dc && dc->lstart <= end) {
+		struct rb_node *node;
+
+		if (dc->len < dpolicy->granularity)
+			goto skip;
+
+		if (dc->state != D_PREP) {
+			list_move_tail(&dc->list, &dcc->fstrim_list);
+			goto skip;
+		}
+
+		__submit_discard_cmd(sbi, dpolicy, dc);
+
+		if (++issued >= dpolicy->max_requests) {
+			start = dc->lstart + dc->len;
+
+			blk_finish_plug(&plug);
+			mutex_unlock(&dcc->cmd_lock);
+			__wait_all_discard_cmd(sbi, NULL);
+			congestion_wait(BLK_RW_ASYNC, HZ/50);
+			goto next;
+		}
+skip:
+		node = rb_next(&dc->rb_node);
+		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
+
+		if (fatal_signal_pending(current))
+			break;
+	}
+
+	blk_finish_plug(&plug);
+	mutex_unlock(&dcc->cmd_lock);
+}
+
 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
 {
 	__u64 start = F2FS_BYTES_TO_BLK(range->start);
 	__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
-	unsigned int start_segno, end_segno, cur_segno;
+	unsigned int start_segno, end_segno;
 	block_t start_block, end_block;
 	struct cp_control cpc;
 	struct discard_policy dpolicy;
@@ -2403,40 +2422,27 @@
 
 	cpc.reason = CP_DISCARD;
 	cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
+	cpc.trim_start = start_segno;
+	cpc.trim_end = end_segno;
 
-	/* do checkpoint to issue discard commands safely */
-	for (cur_segno = start_segno; cur_segno <= end_segno;
-					cur_segno = cpc.trim_end + 1) {
-		cpc.trim_start = cur_segno;
+	if (sbi->discard_blks == 0)
+		goto out;
 
-		if (sbi->discard_blks == 0)
-			break;
-		else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
-			cpc.trim_end = end_segno;
-		else
-			cpc.trim_end = min_t(unsigned int,
-				rounddown(cur_segno +
-				BATCHED_TRIM_SEGMENTS(sbi),
-				sbi->segs_per_sec) - 1, end_segno);
-
-		mutex_lock(&sbi->gc_mutex);
-		err = write_checkpoint(sbi, &cpc);
-		mutex_unlock(&sbi->gc_mutex);
-		if (err)
-			break;
-
-		schedule();
-	}
+	mutex_lock(&sbi->gc_mutex);
+	err = write_checkpoint(sbi, &cpc);
+	mutex_unlock(&sbi->gc_mutex);
+	if (err)
+		goto out;
 
 	start_block = START_BLOCK(sbi, start_segno);
-	end_block = START_BLOCK(sbi, min(cur_segno, end_segno) + 1);
+	end_block = START_BLOCK(sbi, end_segno + 1);
 
-	init_discard_policy(&dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
+	__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
 	__issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block);
 	trimmed = __wait_discard_cmd_range(sbi, &dpolicy,
 					start_block, end_block);
-out:
 	range->len = F2FS_BLK_TO_BYTES(trimmed);
+out:
 	return err;
 }
 
@@ -2758,6 +2764,7 @@
 		fio.op_flags &= ~REQ_META;
 
 	set_page_writeback(page);
+	ClearPageError(page);
 	f2fs_submit_page_write(&fio);
 
 	f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
@@ -3823,8 +3830,6 @@
 	sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
 	sm_info->min_ssr_sections = reserved_sections(sbi);
 
-	sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
-
 	INIT_LIST_HEAD(&sm_info->sit_entry_set);
 
 	init_rwsem(&sm_info->curseg_lock);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 7d4621a..a7711d7 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -740,6 +740,10 @@
 			} else if (strlen(name) == 6 &&
 					!strncmp(name, "strict", 6)) {
 				F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
+			} else if (strlen(name) == 9 &&
+					!strncmp(name, "nobarrier", 9)) {
+				F2FS_OPTION(sbi).fsync_mode =
+							FSYNC_MODE_NOBARRIER;
 			} else {
 				kfree(name);
 				return -EINVAL;
@@ -1936,6 +1940,11 @@
 			inode->i_sb->s_blocksize : F2FS_NAME_LEN;
 }
 
+static inline bool f2fs_is_encrypted(struct inode *inode)
+{
+	return f2fs_encrypted_file(inode);
+}
+
 static const struct fscrypt_operations f2fs_cryptops = {
 	.key_prefix	= "f2fs:",
 	.get_context	= f2fs_get_context,
@@ -1943,6 +1952,7 @@
 	.dummy_context	= f2fs_dummy_context,
 	.empty_dir	= f2fs_empty_dir,
 	.max_namelen	= f2fs_max_namelen,
+	.is_encrypted = f2fs_is_encrypted,
 };
 #endif
 
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index f33a56d..2c53de9 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -245,6 +245,9 @@
 		return count;
 	}
 
+	if (!strcmp(a->attr.name, "trim_sections"))
+		return -EINVAL;
+
 	*ui = t;
 
 	if (!strcmp(a->attr.name, "iostat_enable") && *ui == 0)
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 5af226f..17ad41d 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1942,7 +1942,7 @@
 	}
 
 	if (!list_empty(&wb->work_list))
-		mod_delayed_work(bdi_wq, &wb->dwork, 0);
+		wb_wakeup(wb);
 	else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
 		wb_wakeup_delayed(wb);
 
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index c8c4f79..8a7923a 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -776,6 +776,7 @@
 
 	_enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
 
+again:
 	spin_lock(&object->lock);
 	cookie = object->cookie;
 
@@ -816,10 +817,6 @@
 		goto superseded;
 	page = results[0];
 	_debug("gang %d [%lx]", n, page->index);
-	if (page->index >= op->store_limit) {
-		fscache_stat(&fscache_n_store_pages_over_limit);
-		goto superseded;
-	}
 
 	radix_tree_tag_set(&cookie->stores, page->index,
 			   FSCACHE_COOKIE_STORING_TAG);
@@ -829,6 +826,9 @@
 	spin_unlock(&cookie->stores_lock);
 	spin_unlock(&object->lock);
 
+	if (page->index >= op->store_limit)
+		goto discard_page;
+
 	fscache_stat(&fscache_n_store_pages);
 	fscache_stat(&fscache_n_cop_write_page);
 	ret = object->cache->ops->write_page(op, page);
@@ -844,6 +844,11 @@
 	_leave("");
 	return;
 
+discard_page:
+	fscache_stat(&fscache_n_store_pages_over_limit);
+	fscache_end_page_write(object, page);
+	goto again;
+
 superseded:
 	/* this writer is going away and there aren't any more things to
 	 * write */
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 39c382f..ff93e96 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -801,7 +801,7 @@
 	struct gfs2_inode *ip = GFS2_I(inode);
 	struct gfs2_alloc_parms ap = { .aflags = 0, };
 	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
-	loff_t bytes, max_bytes, max_blks = UINT_MAX;
+	loff_t bytes, max_bytes, max_blks;
 	int error;
 	const loff_t pos = offset;
 	const loff_t count = len;
@@ -853,7 +853,8 @@
 			return error;
 		/* ap.allowed tells us how many blocks quota will allow
 		 * us to write. Check if this reduces max_blks */
-		if (ap.allowed && ap.allowed < max_blks)
+		max_blks = UINT_MAX;
+		if (ap.allowed)
 			max_blks = ap.allowed;
 
 		error = gfs2_inplace_reserve(ip, &ap);
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 5e47c93..836f294 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -45,6 +45,8 @@
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 	int ret;
+
+	ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 		return 0;
 	ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 11854dd..b9563cd 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -588,6 +588,7 @@
 	return 0;
 
 out_put_hidden_dir:
+	cancel_delayed_work_sync(&sbi->sync_work);
 	iput(sbi->hidden_dir);
 out_put_root:
 	dput(sb->s_root);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 4e5c610..9e9e093 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -528,6 +528,7 @@
 	 */
 	ret = start_this_handle(journal, handle, GFP_NOFS);
 	if (ret < 0) {
+		handle->h_journal = journal;
 		jbd2_journal_free_reserved(handle);
 		return ret;
 	}
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 0a754f3..e5a6deb 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -209,8 +209,7 @@
 		  __func__, inode->i_ino, inode->i_mode, inode->i_nlink,
 		  f->inocache->pino_nlink, inode->i_mapping->nrpages);
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	return 0;
 
  fail:
@@ -430,8 +429,7 @@
 	mutex_unlock(&dir_f->sem);
 	jffs2_complete_reservation(c);
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	return 0;
 
  fail:
@@ -575,8 +573,7 @@
 	mutex_unlock(&dir_f->sem);
 	jffs2_complete_reservation(c);
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	return 0;
 
  fail:
@@ -747,8 +744,7 @@
 	mutex_unlock(&dir_f->sem);
 	jffs2_complete_reservation(c);
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	return 0;
 
  fail:
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 567653f..c9c47d0 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -361,7 +361,6 @@
 	ret = -EIO;
 error:
 	mutex_unlock(&f->sem);
-	jffs2_do_clear_inode(c, f);
 	iget_failed(inode);
 	return ERR_PTR(ret);
 }
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index b41596d..56c3fcb 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -178,8 +178,7 @@
 		unlock_new_inode(ip);
 		iput(ip);
 	} else {
-		unlock_new_inode(ip);
-		d_instantiate(dentry, ip);
+		d_instantiate_new(dentry, ip);
 	}
 
       out2:
@@ -313,8 +312,7 @@
 		unlock_new_inode(ip);
 		iput(ip);
 	} else {
-		unlock_new_inode(ip);
-		d_instantiate(dentry, ip);
+		d_instantiate_new(dentry, ip);
 	}
 
       out2:
@@ -1059,8 +1057,7 @@
 		unlock_new_inode(ip);
 		iput(ip);
 	} else {
-		unlock_new_inode(ip);
-		d_instantiate(dentry, ip);
+		d_instantiate_new(dentry, ip);
 	}
 
       out2:
@@ -1447,8 +1444,7 @@
 		unlock_new_inode(ip);
 		iput(ip);
 	} else {
-		unlock_new_inode(ip);
-		d_instantiate(dentry, ip);
+		d_instantiate_new(dentry, ip);
 	}
 
       out1:
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 4d51259..d484c63 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -274,6 +274,8 @@
 	if (ln->nlmsvc_users) {
 		if (--ln->nlmsvc_users == 0) {
 			nlm_shutdown_hosts_net(net);
+			cancel_delayed_work_sync(&ln->grace_period_end);
+			locks_end_grace(&ln->lockd_manager);
 			svc_shutdown_net(serv, net);
 			dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
 		}
diff --git a/fs/namei.c b/fs/namei.c
index a5a05d3..c138ab1 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2905,6 +2905,11 @@
 	if (error)
 		return error;
 	error = dir->i_op->create(dir, dentry, mode, want_excl);
+	if (error)
+		return error;
+	error = security_inode_post_create(dir, dentry, mode);
+	if (error)
+		return error;
 	if (!error)
 		fsnotify_create(dir, dentry);
 	return error;
@@ -3720,6 +3725,13 @@
 		return error;
 
 	error = dir->i_op->mknod(dir, dentry, mode, dev);
+	if (error)
+		return error;
+
+	error = security_inode_post_create(dir, dentry, mode);
+	if (error)
+		return error;
+
 	if (!error)
 		fsnotify_create(dir, dentry);
 	return error;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 1b1b616..91e017c 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1934,7 +1934,7 @@
 	return ret;
 }
 
-static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
+static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
 {
 	switch (err) {
 		default:
@@ -1981,7 +1981,11 @@
 			return -EAGAIN;
 		case -ENOMEM:
 		case -NFS4ERR_DENIED:
-			/* kill_proc(fl->fl_pid, SIGLOST, 1); */
+			if (fl) {
+				struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
+				if (lsp)
+					set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
+			}
 			return 0;
 	}
 	return err;
@@ -2017,7 +2021,7 @@
 		err = nfs4_open_recover_helper(opendata, FMODE_READ);
 	}
 	nfs4_opendata_put(opendata);
-	return nfs4_handle_delegation_recall_error(server, state, stateid, err);
+	return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
 }
 
 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
@@ -6499,7 +6503,7 @@
 	if (err != 0)
 		return err;
 	err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
-	return nfs4_handle_delegation_recall_error(server, state, stateid, err);
+	return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
 }
 
 struct nfs_release_lockowner_data {
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 0bb0e62..3536913 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1429,6 +1429,7 @@
 	struct inode *inode = state->inode;
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct file_lock *fl;
+	struct nfs4_lock_state *lsp;
 	int status = 0;
 	struct file_lock_context *flctx = inode->i_flctx;
 	struct list_head *list;
@@ -1469,7 +1470,9 @@
 		case -NFS4ERR_DENIED:
 		case -NFS4ERR_RECLAIM_BAD:
 		case -NFS4ERR_RECLAIM_CONFLICT:
-			/* kill_proc(fl->fl_pid, SIGLOST, 1); */
+			lsp = fl->fl_u.nfs4_fl.owner;
+			if (lsp)
+				set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
 			status = 0;
 		}
 		spin_lock(&flctx->flc_lock);
diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c
index 8693d77..76241aa 100644
--- a/fs/nfs/nfs4sysctl.c
+++ b/fs/nfs/nfs4sysctl.c
@@ -31,7 +31,7 @@
 		.data = &nfs_idmap_cache_timeout,
 		.maxlen = sizeof(int),
 		.mode = 0644,
-		.proc_handler = proc_dointvec_jiffies,
+		.proc_handler = proc_dointvec,
 	},
 	{ }
 };
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 2b71c60..1631318 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -46,8 +46,7 @@
 	int err = nilfs_add_link(dentry, inode);
 
 	if (!err) {
-		d_instantiate(dentry, inode);
-		unlock_new_inode(inode);
+		d_instantiate_new(dentry, inode);
 		return 0;
 	}
 	inode_dec_link_count(inode);
@@ -243,8 +242,7 @@
 		goto out_fail;
 
 	nilfs_mark_inode_dirty(inode);
-	d_instantiate(dentry, inode);
-	unlock_new_inode(inode);
+	d_instantiate_new(dentry, inode);
 out:
 	if (!err)
 		err = nilfs_transaction_commit(dir->i_sb);
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index bed1fcb..ee8dbba 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -314,7 +314,9 @@
 		return ERR_PTR(ret);
 	}
 
+	down_read(&OCFS2_I(inode)->ip_xattr_sem);
 	acl = ocfs2_get_acl_nolock(inode, type, di_bh);
+	up_read(&OCFS2_I(inode)->ip_xattr_sem);
 
 	ocfs2_inode_unlock(inode, 0);
 	brelse(di_bh);
@@ -333,7 +335,9 @@
 	if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
 		return 0;
 
+	down_read(&OCFS2_I(inode)->ip_xattr_sem);
 	acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
+	up_read(&OCFS2_I(inode)->ip_xattr_sem);
 	if (IS_ERR(acl) || !acl)
 		return PTR_ERR(acl);
 	ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
@@ -364,8 +368,10 @@
 
 	if (!S_ISLNK(inode->i_mode)) {
 		if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
+			down_read(&OCFS2_I(dir)->ip_xattr_sem);
 			acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
 						   dir_bh);
+			up_read(&OCFS2_I(dir)->ip_xattr_sem);
 			if (IS_ERR(acl))
 				return PTR_ERR(acl);
 		}
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 733e4e7..73be0c6 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -675,20 +675,6 @@
 	spin_unlock(&dlm->spinlock);
 }
 
-int dlm_shutting_down(struct dlm_ctxt *dlm)
-{
-	int ret = 0;
-
-	spin_lock(&dlm_domain_lock);
-
-	if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
-		ret = 1;
-
-	spin_unlock(&dlm_domain_lock);
-
-	return ret;
-}
-
 void dlm_unregister_domain(struct dlm_ctxt *dlm)
 {
 	int leave = 0;
diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h
index fd6122a..8a92814 100644
--- a/fs/ocfs2/dlm/dlmdomain.h
+++ b/fs/ocfs2/dlm/dlmdomain.h
@@ -28,7 +28,30 @@
 extern spinlock_t dlm_domain_lock;
 extern struct list_head dlm_domains;
 
-int dlm_shutting_down(struct dlm_ctxt *dlm);
+static inline int dlm_joined(struct dlm_ctxt *dlm)
+{
+	int ret = 0;
+
+	spin_lock(&dlm_domain_lock);
+	if (dlm->dlm_state == DLM_CTXT_JOINED)
+		ret = 1;
+	spin_unlock(&dlm_domain_lock);
+
+	return ret;
+}
+
+static inline int dlm_shutting_down(struct dlm_ctxt *dlm)
+{
+	int ret = 0;
+
+	spin_lock(&dlm_domain_lock);
+	if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
+		ret = 1;
+	spin_unlock(&dlm_domain_lock);
+
+	return ret;
+}
+
 void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
 					int node_num);
 
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index eef3248..844dc8d 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1378,6 +1378,15 @@
 	if (!dlm_grab(dlm))
 		return -EINVAL;
 
+	if (!dlm_joined(dlm)) {
+		mlog(ML_ERROR, "Domain %s not joined! "
+			  "lockres %.*s, master %u\n",
+			  dlm->name, mres->lockname_len,
+			  mres->lockname, mres->master);
+		dlm_put(dlm);
+		return -EINVAL;
+	}
+
 	BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
 
 	real_master = mres->master;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index a244f14..fa947d3 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -666,23 +666,24 @@
 	/* we can safely remove this assertion after testing. */
 	if (!buffer_uptodate(bh)) {
 		mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
-		mlog(ML_ERROR, "b_blocknr=%llu\n",
-		     (unsigned long long)bh->b_blocknr);
+		mlog(ML_ERROR, "b_blocknr=%llu, b_state=0x%lx\n",
+		     (unsigned long long)bh->b_blocknr, bh->b_state);
 
 		lock_buffer(bh);
 		/*
-		 * A previous attempt to write this buffer head failed.
-		 * Nothing we can do but to retry the write and hope for
-		 * the best.
+		 * A previous transaction with a couple of buffer heads fail
+		 * to checkpoint, so all the bhs are marked as BH_Write_EIO.
+		 * For current transaction, the bh is just among those error
+		 * bhs which previous transaction handle. We can't just clear
+		 * its BH_Write_EIO and reuse directly, since other bhs are
+		 * not written to disk yet and that will cause metadata
+		 * inconsistency. So we should set fs read-only to avoid
+		 * further damage.
 		 */
 		if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) {
-			clear_buffer_write_io_error(bh);
-			set_buffer_uptodate(bh);
-		}
-
-		if (!buffer_uptodate(bh)) {
 			unlock_buffer(bh);
-			return -EIO;
+			return ocfs2_error(osb->sb, "A previous attempt to "
+					"write this buffer head failed\n");
 		}
 		unlock_buffer(bh);
 	}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index f56fe39..64dfbe5 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -473,9 +473,8 @@
 		new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
 		if (!new) {
 			ocfs2_release_system_inodes(osb);
-			status = -EINVAL;
+			status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL;
 			mlog_errno(status);
-			/* FIXME: Should ERROR_RO_FS */
 			mlog(ML_ERROR, "Unable to load system inode %d, "
 			     "possibly corrupt fs?", i);
 			goto bail;
@@ -504,7 +503,7 @@
 		new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
 		if (!new) {
 			ocfs2_release_system_inodes(osb);
-			status = -EINVAL;
+			status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL;
 			mlog(ML_ERROR, "status=%d, sysfile=%d, slot=%d\n",
 			     status, i, osb->slot_num);
 			goto bail;
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index cb157a3..03f6ff2 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -638,9 +638,11 @@
 						     si->value_len);
 
 	if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
+		down_read(&OCFS2_I(dir)->ip_xattr_sem);
 		acl_len = ocfs2_xattr_get_nolock(dir, dir_bh,
 					OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT,
 					"", NULL, 0);
+		up_read(&OCFS2_I(dir)->ip_xattr_sem);
 		if (acl_len > 0) {
 			a_size = ocfs2_xattr_entry_real_size(0, acl_len);
 			if (S_ISDIR(mode))
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index 7c31593..561497a 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -70,8 +70,7 @@
 		     get_khandle_from_ino(inode),
 		     dentry);
 
-	d_instantiate(dentry, inode);
-	unlock_new_inode(inode);
+	d_instantiate_new(dentry, inode);
 	orangefs_set_timeout(dentry);
 	ORANGEFS_I(inode)->getattr_time = jiffies - 1;
 
@@ -318,8 +317,7 @@
 		     "Assigned symlink inode new number of %pU\n",
 		     get_khandle_from_ino(inode));
 
-	d_instantiate(dentry, inode);
-	unlock_new_inode(inode);
+	d_instantiate_new(dentry, inode);
 	orangefs_set_timeout(dentry);
 	ORANGEFS_I(inode)->getattr_time = jiffies - 1;
 
@@ -382,8 +380,7 @@
 		     "Assigned dir inode new number of %pU\n",
 		     get_khandle_from_ino(inode));
 
-	d_instantiate(dentry, inode);
-	unlock_new_inode(inode);
+	d_instantiate_new(dentry, inode);
 	orangefs_set_timeout(dentry);
 	ORANGEFS_I(inode)->getattr_time = jiffies - 1;
 
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 794b52a..94f83e7 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -80,6 +80,7 @@
 #include <linux/delayacct.h>
 #include <linux/seq_file.h>
 #include <linux/pid_namespace.h>
+#include <linux/prctl.h>
 #include <linux/ptrace.h>
 #include <linux/tracehook.h>
 #include <linux/string_helpers.h>
@@ -345,8 +346,32 @@
 {
 #ifdef CONFIG_SECCOMP
 	seq_put_decimal_ull(m, "Seccomp:\t", p->seccomp.mode);
-	seq_putc(m, '\n');
 #endif
+	seq_printf(m, "\nSpeculation_Store_Bypass:\t");
+	switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
+	case -EINVAL:
+		seq_printf(m, "unknown");
+		break;
+	case PR_SPEC_NOT_AFFECTED:
+		seq_printf(m, "not vulnerable");
+		break;
+	case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE:
+		seq_printf(m, "thread force mitigated");
+		break;
+	case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
+		seq_printf(m, "thread mitigated");
+		break;
+	case PR_SPEC_PRCTL | PR_SPEC_ENABLE:
+		seq_printf(m, "thread vulnerable");
+		break;
+	case PR_SPEC_DISABLE:
+		seq_printf(m, "globally mitigated");
+		break;
+	default:
+		seq_printf(m, "vulnerable");
+		break;
+	}
+	seq_putc(m, '\n');
 }
 
 static inline void task_context_switch_counts(struct seq_file *m,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index abe157a5..0f23b3b 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -95,6 +95,8 @@
 #include "internal.h"
 #include "fd.h"
 
+#include "../../lib/kstrtox.h"
+
 /* NOTE:
  *	Implementing inode permission operations in /proc is almost
  *	certainly an error.  Permission checks need to happen during
@@ -253,7 +255,7 @@
 	 * Inherently racy -- command line shares address space
 	 * with code and data.
 	 */
-	rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
+	rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_ANON);
 	if (rv <= 0)
 		goto out_free_page;
 
@@ -271,7 +273,7 @@
 			int nr_read;
 
 			_count = min3(count, len, PAGE_SIZE);
-			nr_read = access_remote_vm(mm, p, page, _count, 0);
+			nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
 			if (nr_read < 0)
 				rv = nr_read;
 			if (nr_read <= 0)
@@ -306,7 +308,7 @@
 			bool final;
 
 			_count = min3(count, len, PAGE_SIZE);
-			nr_read = access_remote_vm(mm, p, page, _count, 0);
+			nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
 			if (nr_read < 0)
 				rv = nr_read;
 			if (nr_read <= 0)
@@ -355,7 +357,7 @@
 			bool final;
 
 			_count = min3(count, len, PAGE_SIZE);
-			nr_read = access_remote_vm(mm, p, page, _count, 0);
+			nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
 			if (nr_read < 0)
 				rv = nr_read;
 			if (nr_read <= 0)
@@ -971,7 +973,7 @@
 		max_len = min_t(size_t, PAGE_SIZE, count);
 		this_len = min(max_len, this_len);
 
-		retval = access_remote_vm(mm, (env_start + src), page, this_len, 0);
+		retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON);
 
 		if (retval <= 0) {
 			ret = retval;
@@ -2063,8 +2065,33 @@
 static int dname_to_vma_addr(struct dentry *dentry,
 			     unsigned long *start, unsigned long *end)
 {
-	if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2)
+	const char *str = dentry->d_name.name;
+	unsigned long long sval, eval;
+	unsigned int len;
+
+	len = _parse_integer(str, 16, &sval);
+	if (len & KSTRTOX_OVERFLOW)
 		return -EINVAL;
+	if (sval != (unsigned long)sval)
+		return -EINVAL;
+	str += len;
+
+	if (*str != '-')
+		return -EINVAL;
+	str++;
+
+	len = _parse_integer(str, 16, &eval);
+	if (len & KSTRTOX_OVERFLOW)
+		return -EINVAL;
+	if (eval != (unsigned long)eval)
+		return -EINVAL;
+	str += len;
+
+	if (*str != '\0')
+		return -EINVAL;
+
+	*start = sval;
+	*end = eval;
 
 	return 0;
 }
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index df7e079..7ed961c 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -505,6 +505,10 @@
 			/* we have to zero-fill user buffer even if no read */
 			if (copy_to_user(buffer, buf, tsz))
 				return -EFAULT;
+		} else if (m->type == KCORE_USER) {
+			/* User page is handled prior to normal kernel page: */
+			if (copy_to_user(buffer, (char *)start, tsz))
+				return -EFAULT;
 		} else {
 			if (kern_addr_valid(start)) {
 				/*
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index d4e37ac..847f234 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -660,7 +660,10 @@
 				    struct ctl_table *table)
 {
 	bool ret = true;
+
 	head = sysctl_head_grab(head);
+	if (IS_ERR(head))
+		return false;
 
 	if (S_ISLNK(table->mode)) {
 		/* It is not an error if we can not follow the link ignore it */
diff --git a/fs/proc/uid.c b/fs/proc/uid.c
index 3fd7b9f..b2bb085 100644
--- a/fs/proc/uid.c
+++ b/fs/proc/uid.c
@@ -174,7 +174,7 @@
 		return 0;
 
 	for (u = uid_base_stuff + (ctx->pos - 2);
-	     u <= uid_base_stuff + nents - 1; u++) {
+	     u < uid_base_stuff + nents; u++) {
 		if (!proc_fill_cache(file, ctx, u->name, u->len,
 				     proc_uident_instantiate, NULL, u))
 			break;
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index e6a2b40..1ec728c 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -687,8 +687,7 @@
 	reiserfs_update_inode_transaction(inode);
 	reiserfs_update_inode_transaction(dir);
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	retval = journal_end(&th);
 
 out_failed:
@@ -771,8 +770,7 @@
 		goto out_failed;
 	}
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	retval = journal_end(&th);
 
 out_failed:
@@ -871,8 +869,7 @@
 	/* the above add_entry did not update dir's stat data */
 	reiserfs_update_sd(&th, dir);
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	retval = journal_end(&th);
 out_failed:
 	reiserfs_write_unlock(dir->i_sb);
@@ -1187,8 +1184,7 @@
 		goto out_failed;
 	}
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	retval = journal_end(&th);
 out_failed:
 	reiserfs_write_unlock(parent_dir->i_sb);
diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c
index e9426a6..776d549 100644
--- a/fs/sdcardfs/dentry.c
+++ b/fs/sdcardfs/dentry.c
@@ -51,7 +51,6 @@
 	 * whether the base obbpath has been changed or not
 	 */
 	if (is_obbpath_invalid(dentry)) {
-		d_drop(dentry);
 		return 0;
 	}
 
@@ -65,7 +64,6 @@
 	if ((lower_dentry->d_flags & DCACHE_OP_REVALIDATE)) {
 		err = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
 		if (err == 0) {
-			d_drop(dentry);
 			goto out;
 		}
 	}
@@ -73,14 +71,12 @@
 	spin_lock(&lower_dentry->d_lock);
 	if (d_unhashed(lower_dentry)) {
 		spin_unlock(&lower_dentry->d_lock);
-		d_drop(dentry);
 		err = 0;
 		goto out;
 	}
 	spin_unlock(&lower_dentry->d_lock);
 
 	if (parent_lower_dentry != lower_cur_parent_dentry) {
-		d_drop(dentry);
 		err = 0;
 		goto out;
 	}
@@ -94,7 +90,6 @@
 	}
 
 	if (!qstr_case_eq(&dentry->d_name, &lower_dentry->d_name)) {
-		__d_drop(dentry);
 		err = 0;
 	}
 
@@ -113,7 +108,6 @@
 	if (inode) {
 		data = top_data_get(SDCARDFS_I(inode));
 		if (!data || data->abandoned) {
-			d_drop(dentry);
 			err = 0;
 		}
 		if (data)
@@ -131,6 +125,8 @@
 
 static void sdcardfs_d_release(struct dentry *dentry)
 {
+	if (!dentry || !dentry->d_fsdata)
+		return;
 	/* release and reset the lower paths */
 	if (has_graft_path(dentry))
 		sdcardfs_put_reset_orig_path(dentry);
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index 843fcd2..98051996 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -41,8 +41,6 @@
 
 void free_dentry_private_data(struct dentry *dentry)
 {
-	if (!dentry || !dentry->d_fsdata)
-		return;
 	kmem_cache_free(sdcardfs_dentry_cachep, dentry->d_fsdata);
 	dentry->d_fsdata = NULL;
 }
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index e4fd3fb..30e0c43 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -316,7 +316,7 @@
 	sb->s_root = d_make_root(inode);
 	if (!sb->s_root) {
 		err = -ENOMEM;
-		goto out_iput;
+		goto out_sput;
 	}
 	d_set_d_op(sb->s_root, &sdcardfs_ci_dops);
 
@@ -361,8 +361,7 @@
 	/* no longer needed: free_dentry_private_data(sb->s_root); */
 out_freeroot:
 	dput(sb->s_root);
-out_iput:
-	iput(inode);
+	sb->s_root = NULL;
 out_sput:
 	/* drop refs we took earlier */
 	atomic_dec(&lower_sb->s_active);
@@ -422,7 +421,7 @@
 {
 	struct sdcardfs_sb_info *sbi;
 
-	if (sb->s_magic == SDCARDFS_SUPER_MAGIC) {
+	if (sb->s_magic == SDCARDFS_SUPER_MAGIC && sb->s_fs_info) {
 		sbi = SDCARDFS_SB(sb);
 		mutex_lock(&sdcardfs_super_list_lock);
 		list_del(&sbi->list);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 2d65e28..348b922 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -621,8 +621,7 @@
 	if (fibh.sbh != fibh.ebh)
 		brelse(fibh.ebh);
 	brelse(fibh.sbh);
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 
 	return 0;
 }
@@ -732,8 +731,7 @@
 	inc_nlink(dir);
 	dir->i_ctime = dir->i_mtime = current_time(dir);
 	mark_inode_dirty(dir);
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	if (fibh.sbh != fibh.ebh)
 		brelse(fibh.ebh);
 	brelse(fibh.sbh);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 4b1f6d5..12467ad 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -2094,8 +2094,9 @@
 	bool lvid_open = false;
 
 	uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
-	uopt.uid = INVALID_UID;
-	uopt.gid = INVALID_GID;
+	/* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */
+	uopt.uid = make_kuid(current_user_ns(), overflowuid);
+	uopt.gid = make_kgid(current_user_ns(), overflowgid);
 	uopt.umask = 0;
 	uopt.fmode = UDF_INVALID_MODE;
 	uopt.dmode = UDF_INVALID_MODE;
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 8eca4ed..2109c07 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -38,8 +38,7 @@
 {
 	int err = ufs_add_link(dentry, inode);
 	if (!err) {
-		unlock_new_inode(inode);
-		d_instantiate(dentry, inode);
+		d_instantiate_new(dentry, inode);
 		return 0;
 	}
 	inode_dec_link_count(inode);
@@ -192,8 +191,7 @@
 	if (err)
 		goto out_fail;
 
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
+	d_instantiate_new(dentry, inode);
 	return 0;
 
 out_fail:
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 4ff499a..b2ab123 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -50,6 +50,13 @@
 
 	pag = xfs_perag_get(mp, agno);
 
+	/*
+	 * Force out the log.  This means any transactions that might have freed
+	 * space before we take the AGF buffer lock are now on disk, and the
+	 * volatile disk cache is flushed.
+	 */
+	xfs_log_force(mp, XFS_LOG_SYNC);
+
 	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
 	if (error || !agbp)
 		goto out_put_perag;
@@ -57,13 +64,6 @@
 	cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
 
 	/*
-	 * Force out the log.  This means any transactions that might have freed
-	 * space before we took the AGF buffer lock are now on disk, and the
-	 * volatile disk cache is flushed.
-	 */
-	xfs_log_force(mp, XFS_LOG_SYNC);
-
-	/*
 	 * Look up the longest btree in the AGF and start with it.
 	 */
 	error = xfs_alloc_lookup_ge(cur, 0,
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 362c6b4..1410835 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -846,22 +846,26 @@
 		if (error)
 			goto out_unlock;
 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
-		unsigned int blksize_mask = i_blocksize(inode) - 1;
+		unsigned int	blksize_mask = i_blocksize(inode) - 1;
+		loff_t		isize = i_size_read(inode);
 
-		new_size = i_size_read(inode) + len;
 		if (offset & blksize_mask || len & blksize_mask) {
 			error = -EINVAL;
 			goto out_unlock;
 		}
 
-		/* check the new inode size does not wrap through zero */
-		if (new_size > inode->i_sb->s_maxbytes) {
+		/*
+		 * New inode size must not exceed ->s_maxbytes, accounting for
+		 * possible signed overflow.
+		 */
+		if (inode->i_sb->s_maxbytes - isize < len) {
 			error = -EFBIG;
 			goto out_unlock;
 		}
+		new_size = isize + len;
 
 		/* Offset should be less than i_size */
-		if (offset >= i_size_read(inode)) {
+		if (offset >= isize) {
 			error = -EINVAL;
 			goto out_unlock;
 		}
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index 288cc9e..f2d97b7 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -243,4 +243,7 @@
 #define atomic_long_inc_not_zero(l) \
 	ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
 
+#define atomic_long_cond_read_acquire(v, c) \
+	ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c))
+
 #endif  /*  _ASM_GENERIC_ATOMIC_LONG_H  */
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index bf2d34c..f0d8b1c 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -13,7 +13,7 @@
  */
 
 /**
- * futex_atomic_op_inuser() - Atomic arithmetic operation with constant
+ * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
  *			  argument and comparison of the previous
  *			  futex value with another constant.
  *
@@ -25,18 +25,11 @@
  * <0 - On error
  */
 static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval, ret;
 	u32 tmp;
 
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
 	preempt_disable();
 	pagefault_disable();
 
@@ -74,17 +67,9 @@
 	pagefault_enable();
 	preempt_enable();
 
-	if (ret == 0) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (ret == 0)
+		*oval = oldval;
+
 	return ret;
 }
 
@@ -126,18 +111,9 @@
 
 #else
 static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
 {
-	int op = (encoded_op >> 28) & 7;
-	int cmp = (encoded_op >> 24) & 15;
-	int oparg = (encoded_op << 8) >> 20;
-	int cmparg = (encoded_op << 20) >> 20;
 	int oldval = 0, ret;
-	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-		oparg = 1 << oparg;
-
-	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
-		return -EFAULT;
 
 	pagefault_disable();
 
@@ -153,17 +129,9 @@
 
 	pagefault_enable();
 
-	if (!ret) {
-		switch (cmp) {
-		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-		default: ret = -ENOSYS;
-		}
-	}
+	if (!ret)
+		*oval = oldval;
+
 	return ret;
 }
 
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f6ea0f3..4e8551c 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -234,6 +234,21 @@
 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
 #endif
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * This is an implementation of pmdp_establish() that is only suitable for an
+ * architecture that doesn't have hardware dirty/accessed bits. In this case we
+ * can't race with CPU which sets these bits and non-atomic aproach is fine.
+ */
+static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
+		unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+	pmd_t old_pmd = *pmdp;
+	set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+	return old_pmd;
+}
+#endif
+
 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
 extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 			    pmd_t *pmdp);
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 7d026bf..c39a93a 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -26,30 +26,17 @@
 
 /*
  * Writer states & reader shift and bias.
- *
- *       | +0 | +1 | +2 | +3 |
- *   ----+----+----+----+----+
- *    LE | 78 | 56 | 34 | 12 | 0x12345678
- *   ----+----+----+----+----+
- *       | wr |      rd      |
- *       +----+----+----+----+
- *
- *   ----+----+----+----+----+
- *    BE | 12 | 34 | 56 | 78 | 0x12345678
- *   ----+----+----+----+----+
- *       |      rd      | wr |
- *       +----+----+----+----+
  */
-#define	_QW_WAITING	1		/* A writer is waiting	   */
-#define	_QW_LOCKED	0xff		/* A writer holds the lock */
-#define	_QW_WMASK	0xff		/* Writer mask		   */
-#define	_QR_SHIFT	8		/* Reader count shift	   */
+#define	_QW_WAITING	0x100		/* A writer is waiting	   */
+#define	_QW_LOCKED	0x0ff		/* A writer holds the lock */
+#define	_QW_WMASK	0x1ff		/* Writer mask		   */
+#define	_QR_SHIFT	9		/* Reader count shift	   */
 #define _QR_BIAS	(1U << _QR_SHIFT)
 
 /*
  * External function declarations
  */
-extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts);
+extern void queued_read_lock_slowpath(struct qrwlock *lock);
 extern void queued_write_lock_slowpath(struct qrwlock *lock);
 
 /**
@@ -118,7 +105,7 @@
 		return;
 
 	/* The slowpath will decrement the reader count, if necessary. */
-	queued_read_lock_slowpath(lock, cnts);
+	queued_read_lock_slowpath(lock);
 }
 
 /**
@@ -147,22 +134,12 @@
 }
 
 /**
- * __qrwlock_write_byte - retrieve the write byte address of a queue rwlock
- * @lock : Pointer to queue rwlock structure
- * Return: the write byte address of a queue rwlock
- */
-static inline u8 *__qrwlock_write_byte(struct qrwlock *lock)
-{
-	return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN);
-}
-
-/**
  * queued_write_unlock - release write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
 static inline void queued_write_unlock(struct qrwlock *lock)
 {
-	smp_store_release(__qrwlock_write_byte(lock), 0);
+	smp_store_release(&lock->wlocked, 0);
 }
 
 /*
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
index 0abc6b6..8af752a 100644
--- a/include/asm-generic/qrwlock_types.h
+++ b/include/asm-generic/qrwlock_types.h
@@ -9,12 +9,23 @@
  */
 
 typedef struct qrwlock {
-	atomic_t		cnts;
+	union {
+		atomic_t cnts;
+		struct {
+#ifdef __LITTLE_ENDIAN
+			u8 wlocked;	/* Locked for write? */
+			u8 __lstate[3];
+#else
+			u8 __lstate[3];
+			u8 wlocked;	/* Locked for write? */
+#endif
+		};
+	};
 	arch_spinlock_t		wait_lock;
 } arch_rwlock_t;
 
 #define	__ARCH_RW_LOCK_UNLOCKED {		\
-	.cnts = ATOMIC_INIT(0),			\
+	{ .cnts = ATOMIC_INIT(0), },		\
 	.wait_lock = __ARCH_SPIN_LOCK_UNLOCKED,	\
 }
 
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8236dbd..3c3519b 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -172,7 +172,7 @@
 #endif
 
 #ifdef CONFIG_SERIAL_EARLYCON
-#define EARLYCON_TABLE() STRUCT_ALIGN();			\
+#define EARLYCON_TABLE() . = ALIGN(8);				\
 			 VMLINUX_SYMBOL(__earlycon_table) = .;	\
 			 *(__earlycon_table)			\
 			 VMLINUX_SYMBOL(__earlycon_table_end) = .;
diff --git a/include/dt-bindings/arm/arm-smmu.h b/include/dt-bindings/arm/arm-smmu.h
index 1de45a9..3a1dbd3 100644
--- a/include/dt-bindings/arm/arm-smmu.h
+++ b/include/dt-bindings/arm/arm-smmu.h
@@ -23,6 +23,5 @@
 #define ARM_SMMU_OPT_MMU500_ERRATA1	(1 << 7)
 #define ARM_SMMU_OPT_STATIC_CB          (1 << 8)
 #define ARM_SMMU_OPT_HALT               (1 << 9)
-#define ARM_SMMU_OPT_HIBERNATION	(1 << 10)
 
 #endif
diff --git a/include/dt-bindings/clock/msm-clocks-8952.h b/include/dt-bindings/clock/msm-clocks-8952.h
index 3190d4f..e66c5ed 100644
--- a/include/dt-bindings/clock/msm-clocks-8952.h
+++ b/include/dt-bindings/clock/msm-clocks-8952.h
@@ -237,8 +237,10 @@
 
 #define clk_dsi0pll_byte_clk_src		0xbbaa30be
 #define clk_dsi0pll_pixel_clk_src		0x45b3260f
+#define clk_dsi0pll_vco_clk			0x15940d40
 #define clk_dsi1pll_byte_clk_src		0x63930a8f
 #define clk_dsi1pll_pixel_clk_src		0x0e4c9b56
+#define clk_dsi1pll_vco_clk			0x99797b50
 
 #define clk_dsi_pll0_byte_clk_src		0x44539836
 #define clk_dsi_pll0_pixel_clk_src		0x5767c287
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index c8696df..6da90d0 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,205 +14,213 @@
 #ifndef _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
 #define _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
 
+/* Dummy clocks for rate measurement */
+#define MEASURE_ONLY_SNOC_CLK					0
+#define MEASURE_ONLY_CNOC_CLK					1
+#define MEASURE_ONLY_BIMC_CLK					2
+#define MEASURE_ONLY_IPA_2X_CLK					3
+#define UFS_PHY_AXI_EMMC_VOTE_CLK				4
+#define UFS_PHY_AXI_UFS_VOTE_CLK				5
+
 /* GCC clock registers */
-#define GCC_AGGRE_NOC_PCIE_TBU_CLK				0
-#define GCC_AGGRE_UFS_CARD_AXI_CLK				1
-#define GCC_AGGRE_UFS_PHY_AXI_CLK				2
-#define GCC_AGGRE_USB3_PRIM_AXI_CLK				3
-#define GCC_AGGRE_USB3_SEC_AXI_CLK				4
-#define GCC_BOOT_ROM_AHB_CLK					5
-#define GCC_CAMERA_AHB_CLK					6
-#define GCC_CAMERA_AXI_CLK					7
-#define GCC_CAMERA_XO_CLK					8
-#define GCC_CE1_AHB_CLK						9
-#define GCC_CE1_AXI_CLK						10
-#define GCC_CE1_CLK						11
-#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK				12
-#define GCC_CFG_NOC_USB3_SEC_AXI_CLK				13
-#define GCC_CPUSS_AHB_CLK					14
-#define GCC_CPUSS_AHB_CLK_SRC					15
-#define GCC_CPUSS_DVM_BUS_CLK					16
-#define GCC_CPUSS_GNOC_CLK					17
-#define GCC_CPUSS_RBCPR_CLK					18
-#define GCC_CPUSS_RBCPR_CLK_SRC					19
-#define GCC_DDRSS_GPU_AXI_CLK					20
-#define GCC_DISP_AHB_CLK					21
-#define GCC_DISP_AXI_CLK					22
-#define GCC_DISP_GPLL0_CLK_SRC					23
-#define GCC_DISP_GPLL0_DIV_CLK_SRC				24
-#define GCC_DISP_XO_CLK						25
-#define GCC_GP1_CLK						26
-#define GCC_GP1_CLK_SRC						27
-#define GCC_GP2_CLK						28
-#define GCC_GP2_CLK_SRC						29
-#define GCC_GP3_CLK						30
-#define GCC_GP3_CLK_SRC						31
-#define GCC_GPU_CFG_AHB_CLK					32
-#define GCC_GPU_GPLL0_CLK_SRC					33
-#define GCC_GPU_GPLL0_DIV_CLK_SRC				34
-#define GCC_GPU_MEMNOC_GFX_CLK					35
-#define GCC_GPU_SNOC_DVM_GFX_CLK				36
-#define GCC_MSS_AXIS2_CLK					37
-#define GCC_MSS_CFG_AHB_CLK					38
-#define GCC_MSS_GPLL0_DIV_CLK_SRC				39
-#define GCC_MSS_MFAB_AXIS_CLK					40
-#define GCC_MSS_Q6_MEMNOC_AXI_CLK				41
-#define GCC_MSS_SNOC_AXI_CLK					42
-#define GCC_PCIE_0_AUX_CLK					43
-#define GCC_PCIE_0_AUX_CLK_SRC					44
-#define GCC_PCIE_0_CFG_AHB_CLK					45
-#define GCC_PCIE_0_CLKREF_CLK					46
-#define GCC_PCIE_0_MSTR_AXI_CLK					47
-#define GCC_PCIE_0_PIPE_CLK					48
-#define GCC_PCIE_0_SLV_AXI_CLK					49
-#define GCC_PCIE_0_SLV_Q2A_AXI_CLK				50
-#define GCC_PCIE_1_AUX_CLK					51
-#define GCC_PCIE_1_AUX_CLK_SRC					52
-#define GCC_PCIE_1_CFG_AHB_CLK					53
-#define GCC_PCIE_1_CLKREF_CLK					54
-#define GCC_PCIE_1_MSTR_AXI_CLK					55
-#define GCC_PCIE_1_PIPE_CLK					56
-#define GCC_PCIE_1_SLV_AXI_CLK					57
-#define GCC_PCIE_1_SLV_Q2A_AXI_CLK				58
-#define GCC_PCIE_PHY_AUX_CLK					59
-#define GCC_PCIE_PHY_REFGEN_CLK					60
-#define GCC_PCIE_PHY_REFGEN_CLK_SRC				61
-#define GCC_PDM2_CLK						62
-#define GCC_PDM2_CLK_SRC					63
-#define GCC_PDM_AHB_CLK						64
-#define GCC_PDM_XO4_CLK						65
-#define GCC_PRNG_AHB_CLK					66
-#define GCC_QMIP_CAMERA_AHB_CLK					67
-#define GCC_QMIP_DISP_AHB_CLK					68
-#define GCC_QMIP_VIDEO_AHB_CLK					69
-#define GCC_QUPV3_WRAP0_S0_CLK					70
-#define GCC_QUPV3_WRAP0_S0_CLK_SRC				71
-#define GCC_QUPV3_WRAP0_S1_CLK					72
-#define GCC_QUPV3_WRAP0_S1_CLK_SRC				73
-#define GCC_QUPV3_WRAP0_S2_CLK					74
-#define GCC_QUPV3_WRAP0_S2_CLK_SRC				75
-#define GCC_QUPV3_WRAP0_S3_CLK					76
-#define GCC_QUPV3_WRAP0_S3_CLK_SRC				77
-#define GCC_QUPV3_WRAP0_S4_CLK					78
-#define GCC_QUPV3_WRAP0_S4_CLK_SRC				79
-#define GCC_QUPV3_WRAP0_S5_CLK					80
-#define GCC_QUPV3_WRAP0_S5_CLK_SRC				81
-#define GCC_QUPV3_WRAP0_S6_CLK					82
-#define GCC_QUPV3_WRAP0_S6_CLK_SRC				83
-#define GCC_QUPV3_WRAP0_S7_CLK					84
-#define GCC_QUPV3_WRAP0_S7_CLK_SRC				85
-#define GCC_QUPV3_WRAP1_S0_CLK					86
-#define GCC_QUPV3_WRAP1_S0_CLK_SRC				87
-#define GCC_QUPV3_WRAP1_S1_CLK					88
-#define GCC_QUPV3_WRAP1_S1_CLK_SRC				89
-#define GCC_QUPV3_WRAP1_S2_CLK					90
-#define GCC_QUPV3_WRAP1_S2_CLK_SRC				91
-#define GCC_QUPV3_WRAP1_S3_CLK					92
-#define GCC_QUPV3_WRAP1_S3_CLK_SRC				93
-#define GCC_QUPV3_WRAP1_S4_CLK					94
-#define GCC_QUPV3_WRAP1_S4_CLK_SRC				95
-#define GCC_QUPV3_WRAP1_S5_CLK					96
-#define GCC_QUPV3_WRAP1_S5_CLK_SRC				97
-#define GCC_QUPV3_WRAP1_S6_CLK					98
-#define GCC_QUPV3_WRAP1_S6_CLK_SRC				99
-#define GCC_QUPV3_WRAP1_S7_CLK					100
-#define GCC_QUPV3_WRAP1_S7_CLK_SRC				101
-#define GCC_QUPV3_WRAP_0_M_AHB_CLK				102
-#define GCC_QUPV3_WRAP_0_S_AHB_CLK				103
-#define GCC_QUPV3_WRAP_1_M_AHB_CLK				104
-#define GCC_QUPV3_WRAP_1_S_AHB_CLK				105
-#define GCC_SDCC2_AHB_CLK					106
-#define GCC_SDCC2_APPS_CLK					107
-#define GCC_SDCC2_APPS_CLK_SRC					108
-#define GCC_SDCC4_AHB_CLK					109
-#define GCC_SDCC4_APPS_CLK					110
-#define GCC_SDCC4_APPS_CLK_SRC					111
-#define GCC_SYS_NOC_CPUSS_AHB_CLK				112
-#define GCC_TSIF_AHB_CLK					113
-#define GCC_TSIF_INACTIVITY_TIMERS_CLK				114
-#define GCC_TSIF_REF_CLK					115
-#define GCC_TSIF_REF_CLK_SRC					116
-#define GCC_UFS_CARD_AHB_CLK					117
-#define GCC_UFS_CARD_AXI_CLK					118
-#define GCC_UFS_CARD_AXI_CLK_SRC				119
-#define GCC_UFS_CARD_CLKREF_CLK					120
-#define GCC_UFS_CARD_ICE_CORE_CLK				121
-#define GCC_UFS_CARD_ICE_CORE_CLK_SRC				122
-#define GCC_UFS_CARD_PHY_AUX_CLK				123
-#define GCC_UFS_CARD_PHY_AUX_CLK_SRC				124
-#define GCC_UFS_CARD_RX_SYMBOL_0_CLK				125
-#define GCC_UFS_CARD_RX_SYMBOL_1_CLK				126
-#define GCC_UFS_CARD_TX_SYMBOL_0_CLK				127
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK				128
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC			129
-#define GCC_UFS_MEM_CLKREF_CLK					130
-#define GCC_UFS_PHY_AHB_CLK					131
-#define GCC_UFS_PHY_AXI_CLK					132
-#define GCC_UFS_PHY_AXI_CLK_SRC					133
-#define GCC_UFS_PHY_ICE_CORE_CLK				134
-#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				135
-#define GCC_UFS_PHY_PHY_AUX_CLK					136
-#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				137
-#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				138
-#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				139
-#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				140
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK				141
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				142
-#define GCC_USB30_PRIM_MASTER_CLK				143
-#define GCC_USB30_PRIM_MASTER_CLK_SRC				144
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK				145
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			146
-#define GCC_USB30_PRIM_SLEEP_CLK				147
-#define GCC_USB30_SEC_MASTER_CLK				148
-#define GCC_USB30_SEC_MASTER_CLK_SRC				149
-#define GCC_USB30_SEC_MOCK_UTMI_CLK				150
-#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC				151
-#define GCC_USB30_SEC_SLEEP_CLK					152
-#define GCC_USB3_PRIM_CLKREF_CLK				153
-#define GCC_USB3_PRIM_PHY_AUX_CLK				154
-#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				155
-#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				156
-#define GCC_USB3_PRIM_PHY_PIPE_CLK				157
-#define GCC_USB3_SEC_CLKREF_CLK					158
-#define GCC_USB3_SEC_PHY_AUX_CLK				159
-#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				160
-#define GCC_USB3_SEC_PHY_COM_AUX_CLK				161
-#define GCC_USB3_SEC_PHY_PIPE_CLK				162
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK				163
-#define GCC_VIDEO_AHB_CLK					164
-#define GCC_VIDEO_AXI_CLK					165
-#define GCC_VIDEO_XO_CLK					166
-#define GPLL0							167
-#define GPLL0_OUT_EVEN						168
-#define GPLL0_OUT_MAIN						169
-#define GCC_UFS_CARD_AXI_HW_CTL_CLK				170
-#define GCC_UFS_PHY_AXI_HW_CTL_CLK				171
-#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK			172
-#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK			173
-#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK			174
-#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK				175
-#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK			176
-#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK			177
-#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK				178
-#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK				179
-#define GCC_GPU_IREF_CLK					180
-#define GCC_SDCC1_AHB_CLK					181
-#define GCC_SDCC1_APPS_CLK					182
-#define GCC_SDCC1_ICE_CORE_CLK					183
-#define GCC_SDCC1_APPS_CLK_SRC					184
-#define GCC_SDCC1_ICE_CORE_CLK_SRC				185
-#define GCC_APC_VS_CLK						186
-#define GCC_GPU_VS_CLK						187
-#define GCC_MSS_VS_CLK						188
-#define GCC_VDDA_VS_CLK						189
-#define GCC_VDDCX_VS_CLK					190
-#define GCC_VDDMX_VS_CLK					191
-#define GCC_VS_CTRL_AHB_CLK					192
-#define GCC_VS_CTRL_CLK						193
-#define GCC_VS_CTRL_CLK_SRC					194
-#define GCC_VSENSOR_CLK_SRC					195
-#define GPLL4							196
-#define GPLL6							197
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK				6
+#define GCC_AGGRE_UFS_CARD_AXI_CLK				7
+#define GCC_AGGRE_UFS_PHY_AXI_CLK				8
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK				9
+#define GCC_AGGRE_USB3_SEC_AXI_CLK				10
+#define GCC_BOOT_ROM_AHB_CLK					11
+#define GCC_CAMERA_AHB_CLK					12
+#define GCC_CAMERA_AXI_CLK					13
+#define GCC_CAMERA_XO_CLK					14
+#define GCC_CE1_AHB_CLK						15
+#define GCC_CE1_AXI_CLK						16
+#define GCC_CE1_CLK						17
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK				18
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK				19
+#define GCC_CPUSS_AHB_CLK					20
+#define GCC_CPUSS_AHB_CLK_SRC					21
+#define GCC_CPUSS_DVM_BUS_CLK					22
+#define GCC_CPUSS_GNOC_CLK					23
+#define GCC_CPUSS_RBCPR_CLK					24
+#define GCC_CPUSS_RBCPR_CLK_SRC					25
+#define GCC_DDRSS_GPU_AXI_CLK					26
+#define GCC_DISP_AHB_CLK					27
+#define GCC_DISP_AXI_CLK					28
+#define GCC_DISP_GPLL0_CLK_SRC					29
+#define GCC_DISP_GPLL0_DIV_CLK_SRC				30
+#define GCC_DISP_XO_CLK						31
+#define GCC_GP1_CLK						32
+#define GCC_GP1_CLK_SRC						33
+#define GCC_GP2_CLK						34
+#define GCC_GP2_CLK_SRC						35
+#define GCC_GP3_CLK						36
+#define GCC_GP3_CLK_SRC						37
+#define GCC_GPU_CFG_AHB_CLK					38
+#define GCC_GPU_GPLL0_CLK_SRC					39
+#define GCC_GPU_GPLL0_DIV_CLK_SRC				40
+#define GCC_GPU_MEMNOC_GFX_CLK					41
+#define GCC_GPU_SNOC_DVM_GFX_CLK				42
+#define GCC_MSS_AXIS2_CLK					43
+#define GCC_MSS_CFG_AHB_CLK					44
+#define GCC_MSS_GPLL0_DIV_CLK_SRC				45
+#define GCC_MSS_MFAB_AXIS_CLK					46
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK				47
+#define GCC_MSS_SNOC_AXI_CLK					48
+#define GCC_PCIE_0_AUX_CLK					49
+#define GCC_PCIE_0_AUX_CLK_SRC					50
+#define GCC_PCIE_0_CFG_AHB_CLK					51
+#define GCC_PCIE_0_CLKREF_CLK					52
+#define GCC_PCIE_0_MSTR_AXI_CLK					53
+#define GCC_PCIE_0_PIPE_CLK					54
+#define GCC_PCIE_0_SLV_AXI_CLK					55
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK				56
+#define GCC_PCIE_1_AUX_CLK					57
+#define GCC_PCIE_1_AUX_CLK_SRC					58
+#define GCC_PCIE_1_CFG_AHB_CLK					59
+#define GCC_PCIE_1_CLKREF_CLK					60
+#define GCC_PCIE_1_MSTR_AXI_CLK					61
+#define GCC_PCIE_1_PIPE_CLK					62
+#define GCC_PCIE_1_SLV_AXI_CLK					63
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK				64
+#define GCC_PCIE_PHY_AUX_CLK					65
+#define GCC_PCIE_PHY_REFGEN_CLK					66
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC				67
+#define GCC_PDM2_CLK						68
+#define GCC_PDM2_CLK_SRC					69
+#define GCC_PDM_AHB_CLK						70
+#define GCC_PDM_XO4_CLK						71
+#define GCC_PRNG_AHB_CLK					72
+#define GCC_QMIP_CAMERA_AHB_CLK					73
+#define GCC_QMIP_DISP_AHB_CLK					74
+#define GCC_QMIP_VIDEO_AHB_CLK					75
+#define GCC_QUPV3_WRAP0_S0_CLK					76
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC				77
+#define GCC_QUPV3_WRAP0_S1_CLK					78
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC				79
+#define GCC_QUPV3_WRAP0_S2_CLK					80
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC				81
+#define GCC_QUPV3_WRAP0_S3_CLK					82
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC				83
+#define GCC_QUPV3_WRAP0_S4_CLK					84
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC				85
+#define GCC_QUPV3_WRAP0_S5_CLK					86
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC				87
+#define GCC_QUPV3_WRAP0_S6_CLK					88
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC				89
+#define GCC_QUPV3_WRAP0_S7_CLK					90
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC				91
+#define GCC_QUPV3_WRAP1_S0_CLK					92
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC				93
+#define GCC_QUPV3_WRAP1_S1_CLK					94
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC				95
+#define GCC_QUPV3_WRAP1_S2_CLK					96
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC				97
+#define GCC_QUPV3_WRAP1_S3_CLK					98
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC				99
+#define GCC_QUPV3_WRAP1_S4_CLK					100
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC				101
+#define GCC_QUPV3_WRAP1_S5_CLK					102
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC				103
+#define GCC_QUPV3_WRAP1_S6_CLK					104
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC				105
+#define GCC_QUPV3_WRAP1_S7_CLK					106
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC				107
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK				108
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK				109
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK				110
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK				111
+#define GCC_SDCC2_AHB_CLK					112
+#define GCC_SDCC2_APPS_CLK					113
+#define GCC_SDCC2_APPS_CLK_SRC					114
+#define GCC_SDCC4_AHB_CLK					115
+#define GCC_SDCC4_APPS_CLK					116
+#define GCC_SDCC4_APPS_CLK_SRC					117
+#define GCC_SYS_NOC_CPUSS_AHB_CLK				118
+#define GCC_TSIF_AHB_CLK					119
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK				120
+#define GCC_TSIF_REF_CLK					121
+#define GCC_TSIF_REF_CLK_SRC					122
+#define GCC_UFS_CARD_AHB_CLK					123
+#define GCC_UFS_CARD_AXI_CLK					124
+#define GCC_UFS_CARD_AXI_CLK_SRC				125
+#define GCC_UFS_CARD_CLKREF_CLK					126
+#define GCC_UFS_CARD_ICE_CORE_CLK				127
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC				128
+#define GCC_UFS_CARD_PHY_AUX_CLK				129
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC				130
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK				131
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK				132
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK				133
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK				134
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC			135
+#define GCC_UFS_MEM_CLKREF_CLK					136
+#define GCC_UFS_PHY_AHB_CLK					137
+#define GCC_UFS_PHY_AXI_CLK					138
+#define GCC_UFS_PHY_AXI_CLK_SRC					139
+#define GCC_UFS_PHY_ICE_CORE_CLK				140
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				141
+#define GCC_UFS_PHY_PHY_AUX_CLK					142
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				143
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				144
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				145
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				146
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK				147
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				148
+#define GCC_USB30_PRIM_MASTER_CLK				149
+#define GCC_USB30_PRIM_MASTER_CLK_SRC				150
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK				151
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			152
+#define GCC_USB30_PRIM_SLEEP_CLK				153
+#define GCC_USB30_SEC_MASTER_CLK				154
+#define GCC_USB30_SEC_MASTER_CLK_SRC				155
+#define GCC_USB30_SEC_MOCK_UTMI_CLK				156
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC				157
+#define GCC_USB30_SEC_SLEEP_CLK					158
+#define GCC_USB3_PRIM_CLKREF_CLK				159
+#define GCC_USB3_PRIM_PHY_AUX_CLK				160
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				161
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				162
+#define GCC_USB3_PRIM_PHY_PIPE_CLK				163
+#define GCC_USB3_SEC_CLKREF_CLK					164
+#define GCC_USB3_SEC_PHY_AUX_CLK				165
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				166
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK				167
+#define GCC_USB3_SEC_PHY_PIPE_CLK				168
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK				169
+#define GCC_VIDEO_AHB_CLK					170
+#define GCC_VIDEO_AXI_CLK					171
+#define GCC_VIDEO_XO_CLK					172
+#define GPLL0							173
+#define GPLL0_OUT_EVEN						174
+#define GPLL0_OUT_MAIN						175
+#define GCC_UFS_CARD_AXI_HW_CTL_CLK				176
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK				177
+#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK			178
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK			179
+#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK			180
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK				181
+#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK			182
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK			183
+#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK				184
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK				185
+#define GCC_GPU_IREF_CLK					186
+#define GCC_SDCC1_AHB_CLK					187
+#define GCC_SDCC1_APPS_CLK					188
+#define GCC_SDCC1_ICE_CORE_CLK					189
+#define GCC_SDCC1_APPS_CLK_SRC					190
+#define GCC_SDCC1_ICE_CORE_CLK_SRC				191
+#define GCC_APC_VS_CLK						192
+#define GCC_GPU_VS_CLK						193
+#define GCC_MSS_VS_CLK						194
+#define GCC_VDDA_VS_CLK						195
+#define GCC_VDDCX_VS_CLK					196
+#define GCC_VDDMX_VS_CLK					197
+#define GCC_VS_CTRL_AHB_CLK					198
+#define GCC_VS_CTRL_CLK						199
+#define GCC_VS_CTRL_CLK_SRC					200
+#define GCC_VSENSOR_CLK_SRC					201
+#define GPLL4							202
+#define GPLL6							203
 
 /* GCC reset clocks */
 #define GCC_MMSS_BCR						0
@@ -243,10 +251,4 @@
 #define GCC_PCIE_1_PHY_BCR					25
 #define GCC_SDCC1_BCR						26
 
-/* Dummy clocks for rate measurement */
-#define MEASURE_ONLY_SNOC_CLK					0
-#define MEASURE_ONLY_CNOC_CLK					1
-#define MEASURE_ONLY_BIMC_CLK					2
-#define MEASURE_ONLY_IPA_2X_CLK					3
-
 #endif
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
index 64e2dc7..7ac6f16 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
@@ -11,9 +11,14 @@
 #define PMIC_GPIO_PULL_UP_1P5_30	3
 
 #define PMIC_GPIO_STRENGTH_NO		0
-#define PMIC_GPIO_STRENGTH_HIGH		1
+#define PMIC_GPIO_STRENGTH_LOW		1
 #define PMIC_GPIO_STRENGTH_MED		2
-#define PMIC_GPIO_STRENGTH_LOW		3
+#define PMIC_GPIO_STRENGTH_HIGH		3
+
+#define PM8921_GPIO_STRENGTH_NO		0
+#define PM8921_GPIO_STRENGTH_HIGH	1
+#define PM8921_GPIO_STRENGTH_MED	2
+#define PM8921_GPIO_STRENGTH_LOW	3
 
 /*
  * Note: PM8018 GPIO3 and GPIO4 are supporting
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
index e518e4e..4b15481 100644
--- a/include/kvm/arm_psci.h
+++ b/include/kvm/arm_psci.h
@@ -37,10 +37,15 @@
 	 * Our PSCI implementation stays the same across versions from
 	 * v0.2 onward, only adding the few mandatory functions (such
 	 * as FEATURES with 1.0) that are required by newer
-	 * revisions. It is thus safe to return the latest.
+	 * revisions. It is thus safe to return the latest, unless
+	 * userspace has instructed us otherwise.
 	 */
-	if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+	if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
+		if (vcpu->kvm->arch.psci_version)
+			return vcpu->kvm->arch.psci_version;
+
 		return KVM_ARM_PSCI_LATEST;
+	}
 
 	return KVM_ARM_PSCI_0_1;
 }
@@ -48,4 +53,11 @@
 
 int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
 
+struct kvm_one_reg;
+
+int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+
 #endif /* __KVM_ARM_PSCI_H__ */
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index e71835b..bc7dba0 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -627,6 +627,8 @@
 }
 #endif
 
+#define atomic_cond_read_acquire(v, c)	smp_cond_load_acquire(&(v)->counter, (c))
+
 #ifdef CONFIG_GENERIC_ATOMIC64
 #include <asm-generic/atomic64.h>
 #endif
@@ -1023,6 +1025,8 @@
 }
 #endif
 
+#define atomic64_cond_read_acquire(v, c)	smp_cond_load_acquire(&(v)->counter, (c))
+
 #include <asm-generic/atomic-long.h>
 
 #endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 97cb48f..0885c9f 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -61,6 +61,9 @@
 	((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
 #define bio_sectors(bio)	((bio)->bi_iter.bi_size >> 9)
 #define bio_end_sector(bio)	((bio)->bi_iter.bi_sector + bio_sectors((bio)))
+#define bio_dun(bio)		((bio)->bi_iter.bi_dun)
+#define bio_duns(bio)		(bio_sectors(bio) >> 3) /* 4KB unit */
+#define bio_end_dun(bio)	(bio_dun(bio) + bio_duns(bio))
 
 /*
  * Check whether this bio carries any data or not. A NULL bio is allowed.
@@ -169,6 +172,11 @@
 {
 	iter->bi_sector += bytes >> 9;
 
+#ifdef CONFIG_PFK
+	if (iter->bi_dun)
+		iter->bi_dun += bytes >> 12;
+#endif
+
 	if (bio_no_advance_iter(bio))
 		iter->bi_size -= bytes;
 	else
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index e4d84d3..5a3712c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -66,6 +66,10 @@
 		struct bio_integrity_payload *bi_integrity; /* data integrity */
 #endif
 	};
+#ifdef CONFIG_PFK
+	/* Encryption key to use (NULL if none) */
+	const struct blk_encryption_key	*bi_crypt_key;
+#endif
 
 	unsigned short		bi_vcnt;	/* how many bio_vec's */
 
@@ -82,6 +86,12 @@
 	struct bio_set		*bi_pool;
 
 	/*
+	 * When using dircet-io (O_DIRECT), we can't get the inode from a bio
+	 * by walking bio->bi_io_vec->bv_page->mapping->host
+	 * since the page is anon.
+	 */
+	struct inode		*bi_dio_inode;
+	/*
 	 * We can inline a number of vecs at the end of the bio, to avoid
 	 * double allocations for a small number of bio_vecs. This member
 	 * MUST obviously be kept at the very end of the bio.
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b0f981a..661dcec 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -509,6 +509,7 @@
 #define QUEUE_FLAG_FLUSH_NQ    25	/* flush not queueuable */
 #define QUEUE_FLAG_DAX         26	/* device supports DAX */
 #define QUEUE_FLAG_FAST        27	/* fast block device (e.g. ram based) */
+#define QUEUE_FLAG_INLINECRYPT 28	/* inline encryption support */
 
 #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
@@ -600,6 +601,8 @@
 	(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
 #define blk_queue_dax(q)	test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
 #define blk_queue_fast(q)	test_bit(QUEUE_FLAG_FAST, &(q)->queue_flags)
+#define blk_queue_inlinecrypt(q) \
+	test_bit(QUEUE_FLAG_INLINECRYPT, &(q)->queue_flags)
 
 #define blk_noretry_request(rq) \
 	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index 89b65b8..dbf1f2c 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -41,6 +41,7 @@
 
 	unsigned int            bi_bvec_done;	/* number of bytes completed in
 						   current bvec */
+	u64			bi_dun;		/* DUN setting for bio */
 };
 
 /*
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 3a4f264..a8b4284 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -117,7 +117,7 @@
 #define CLOCK_SOURCE_RESELECT			0x100
 
 /* simplify initialization of mask field */
-#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
+#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
 
 static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
 {
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 27101bb..eba9285 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -207,6 +207,17 @@
 #endif
 #endif
 
+#ifdef CONFIG_STACK_VALIDATION
+#define annotate_unreachable() ({					\
+	asm("1:\t\n"							\
+	    ".pushsection __unreachable, \"a\"\t\n"			\
+	    ".long 1b\t\n"						\
+	    ".popsection\t\n");						\
+})
+#else
+#define annotate_unreachable()
+#endif
+
 /*
  * Mark a position in code as unreachable.  This can be used to
  * suppress control flow warnings after asm blocks that transfer
@@ -216,7 +227,8 @@
  * this in the preprocessor, but we can live with this because they're
  * unreleased.  Really, we need to have autoconf for the kernel.
  */
-#define unreachable() __builtin_unreachable()
+#define unreachable() \
+	do { annotate_unreachable(); __builtin_unreachable(); } while (0)
 
 /* Mark a function definition as prohibited from being cloned. */
 #define __noclone	__attribute__((__noclone__, __optimize__("no-tracer")))
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 1f7e4ec..912d945 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -50,6 +50,8 @@
 				   struct device_attribute *attr, char *buf);
 extern ssize_t cpu_show_spectre_v2(struct device *dev,
 				   struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
+					  struct device_attribute *attr, char *buf);
 
 extern __printf(4, 5)
 struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 45d5522..0fbce32 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -424,6 +424,7 @@
 #define CPUFREQ_START			(2)
 #define CPUFREQ_CREATE_POLICY		(3)
 #define CPUFREQ_REMOVE_POLICY		(4)
+#define CPUFREQ_STOP			(5)
 
 /* Govinfo Notifiers */
 #define CPUFREQ_LOAD_CHANGE		(0)
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index bd96bb2..df17901 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -179,6 +179,8 @@
 	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
 #define for_each_cpu_not(cpu, mask)		\
 	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
+#define for_each_cpu_wrap(cpu, mask, start)	\
+	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
 #define for_each_cpu_and(cpu, mask, and)	\
 	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
 #else
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 3d4a198..014d7f9 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -220,6 +220,7 @@
  * These are the low-level FS interfaces to the dcache..
  */
 extern void d_instantiate(struct dentry *, struct inode *);
+extern void d_instantiate_new(struct dentry *, struct inode *);
 extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
 extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
 extern void __d_drop(struct dentry *dentry);
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index b20a094..2e75ae2 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -23,7 +23,6 @@
 
 struct device;
 struct file_operations;
-struct srcu_struct;
 
 struct debugfs_blob_wrapper {
 	void *data;
@@ -43,25 +42,6 @@
 
 extern struct dentry *arch_debugfs_dir;
 
-extern struct srcu_struct debugfs_srcu;
-
-/**
- * debugfs_real_fops - getter for the real file operation
- * @filp: a pointer to a struct file
- *
- * Must only be called under the protection established by
- * debugfs_use_file_start().
- */
-static inline const struct file_operations *debugfs_real_fops(struct file *filp)
-	__must_hold(&debugfs_srcu)
-{
-	/*
-	 * Neither the pointer to the struct file_operations, nor its
-	 * contents ever change -- srcu_dereference() is not needed here.
-	 */
-	return filp->f_path.dentry->d_fsdata;
-}
-
 #define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt)		\
 static int __fops ## _open(struct inode *inode, struct file *file)	\
 {									\
@@ -105,10 +85,10 @@
 void debugfs_remove(struct dentry *dentry);
 void debugfs_remove_recursive(struct dentry *dentry);
 
-int debugfs_use_file_start(const struct dentry *dentry, int *srcu_idx)
-	__acquires(&debugfs_srcu);
+const struct file_operations *debugfs_real_fops(const struct file *filp);
 
-void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu);
+int debugfs_file_get(struct dentry *dentry);
+void debugfs_file_put(struct dentry *dentry);
 
 ssize_t debugfs_attr_read(struct file *file, char __user *buf,
 			size_t len, loff_t *ppos);
@@ -223,15 +203,12 @@
 static inline void debugfs_remove_recursive(struct dentry *dentry)
 { }
 
-static inline int debugfs_use_file_start(const struct dentry *dentry,
-					int *srcu_idx)
-	__acquires(&debugfs_srcu)
+static inline int debugfs_file_get(struct dentry *dentry)
 {
 	return 0;
 }
 
-static inline void debugfs_use_file_finish(int srcu_idx)
-	__releases(&debugfs_srcu)
+static inline void debugfs_file_put(struct dentry *dentry)
 { }
 
 static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf,
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index d9912e0..62f4fa8 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -146,7 +146,7 @@
  * a new RANGE of SSIDs to the msg_mask_tbl.
  */
 #define MSG_MASK_TBL_CNT		26
-#define APPS_EVENT_LAST_ID		0x0C5B
+#define APPS_EVENT_LAST_ID		0xC7A
 
 #define MSG_SSID_0			0
 #define MSG_SSID_0_LAST			125
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 5e204a5..2877ccb 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -380,8 +380,8 @@
 	u32 attributes;
 	u32 get_bar_attributes;
 	u32 set_bar_attributes;
-	uint64_t romsize;
-	void *romimage;
+	u64 romsize;
+	u32 romimage;
 } efi_pci_io_protocol_32;
 
 typedef struct {
@@ -400,8 +400,8 @@
 	u64 attributes;
 	u64 get_bar_attributes;
 	u64 set_bar_attributes;
-	uint64_t romsize;
-	void *romimage;
+	u64 romsize;
+	u64 romimage;
 } efi_pci_io_protocol_64;
 
 typedef struct {
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 8a3bdad..1c4da43 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2948,6 +2948,8 @@
 		wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
 }
 
+struct inode *dio_bio_get_inode(struct bio *bio);
+
 extern void inode_set_flags(struct inode *inode, unsigned int flags,
 			    unsigned int mask);
 
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 9e535af..8f4e90c 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -18,6 +18,14 @@
 #define FS_CRYPTO_BLOCK_SIZE		16
 
 struct fscrypt_ctx;
+
+/* iv sector for security/pfe/pfk_fscrypt.c and f2fs. sizeof is required
+ * to accommodate 32 bit targets.
+ */
+#define PG_DUN(i, p)                                            \
+	((((i)->i_ino & 0xffffffff) << (sizeof((i)->i_ino)/2)) | \
+				((p)->index & 0xffffffff))
+
 struct fscrypt_info;
 
 struct fscrypt_str {
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index 6f97714..ce4df7a 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -183,6 +183,21 @@
 	return -EOPNOTSUPP;
 }
 
+/* fscrypt_ice.c */
+static inline int fscrypt_using_hardware_encryption(const struct inode *inode)
+{
+	return 0;
+}
+
+static inline void fscrypt_set_ice_dun(const struct inode *inode,
+		struct bio *bio, u64 dun){}
+
+static inline bool fscrypt_mergeable_bio(struct bio *bio,
+		sector_t iv_block, bool bio_encrypted)
+{
+	return true;
+}
+
 /* hooks.c */
 
 static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index 1ed79ee..32e2b6c 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -29,6 +29,7 @@
 	bool (*dummy_context)(struct inode *);
 	bool (*empty_dir)(struct inode *);
 	unsigned (*max_namelen)(struct inode *);
+	bool (*is_encrypted)(struct inode *);
 };
 
 struct fscrypt_ctx {
@@ -195,6 +196,13 @@
 extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
 				 unsigned int);
 
+/* fscrypt_ice.c */
+extern int fscrypt_using_hardware_encryption(const struct inode *inode);
+extern void fscrypt_set_ice_dun(const struct inode *inode,
+		struct bio *bio, u64 dun);
+extern bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted);
+
+
 /* hooks.c */
 extern int fscrypt_file_open(struct inode *inode, struct file *filp);
 extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index f4c0d36..ab7938a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -244,8 +244,16 @@
 	return *this_cpu_ptr(ops->disabled);
 }
 
+#ifdef CONFIG_CFI_CLANG
+/* Use a C stub with the correct type for CFI */
+static inline void ftrace_stub(unsigned long a0, unsigned long a1,
+			       struct ftrace_ops *op, struct pt_regs *regs)
+{
+}
+#else
 extern void ftrace_stub(unsigned long a0, unsigned long a1,
 			struct ftrace_ops *op, struct pt_regs *regs);
+#endif
 
 #else /* !CONFIG_FUNCTION_TRACER */
 /*
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 8feecd5..7e39719 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -600,7 +600,7 @@
  * Returns true if the skb is tagged with multiple vlan headers, regardless
  * of whether it is hardware accelerated or not.
  */
-static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
 {
 	__be16 protocol = skb->protocol;
 
@@ -610,6 +610,9 @@
 		if (likely(!eth_type_vlan(protocol)))
 			return false;
 
+		if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
+			return false;
+
 		veh = (struct vlan_ethhdr *)skb->data;
 		protocol = veh->h_vlan_encapsulated_proto;
 	}
@@ -627,7 +630,7 @@
  *
  * Returns features without unsafe ones if the skb has multiple tags.
  */
-static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
+static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
 						    netdev_features_t features)
 {
 	if (skb_vlan_tagged_multi(skb)) {
diff --git a/include/linux/input/synaptics_dsx_v2_6.h b/include/linux/input/synaptics_dsx_v2_6.h
index 5cd26bb..75d4b77 100644
--- a/include/linux/input/synaptics_dsx_v2_6.h
+++ b/include/linux/input/synaptics_dsx_v2_6.h
@@ -59,6 +59,7 @@
  * @y_flip: y flip flag
  * @swap_axes: swap axes flag
  * @resume_in_workqueue: defer resume function to workqueue
+ * @resume_in_workqueue: do not disable/enable regulators in suspend/resume
  * @irq_gpio: attention interrupt GPIO
  * @irq_on_state: attention interrupt active state
  * @power_gpio: power switch GPIO
@@ -75,6 +76,7 @@
  * @power_delay_ms: delay time to wait after powering up device
  * @reset_delay_ms: delay time to wait after resetting device
  * @reset_active_ms: reset active time
+ * @bus_lpm_cur_uA: low power mode current setting for bus
  * @byte_delay_us: delay time between two bytes of SPI data
  * @block_delay_us: delay time between two SPI transfers
  * @pwr_reg_name: pointer to name of regulator for power control
@@ -88,12 +90,14 @@
 	bool swap_axes;
 	bool resume_in_workqueue;
 	bool wakeup_gesture_en;
+	bool dont_disable_regs;
 	int irq_gpio;
 	int irq_on_state;
 	int power_gpio;
 	int power_on_state;
 	int reset_gpio;
 	int reset_on_state;
+	int bus_lpm_cur_uA;
 	int max_y_for_2d;
 	unsigned long irq_flags;
 	unsigned short i2c_addr;
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index 1a1c68e..2c31666 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -1156,6 +1156,7 @@
  * @ipa_if_tlv: number of IPA_IF TLV
  * @ipa_if_aos: number of IPA_IF AOS
  * @ee: Execution environment
+ * @prefetch_mode: Prefetch mode to be used
  */
 struct ipa_gsi_ep_config {
 	int ipa_ep_num;
@@ -1163,6 +1164,7 @@
 	int ipa_if_tlv;
 	int ipa_if_aos;
 	int ee;
+	enum gsi_prefetch_mode prefetch_mode;
 };
 
 /**
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index c122409..914eb4a 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -451,6 +451,8 @@
 }
 
 void gic_show_pending_irqs(void);
+void gic_v3_dist_save(void);
+void gic_v3_dist_restore(void);
 unsigned int get_gic_highpri_irq(void);
 #endif
 
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index d927622..3ffade4 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -9,6 +9,7 @@
 	KCORE_VMALLOC,
 	KCORE_RAM,
 	KCORE_VMEMMAP,
+	KCORE_USER,
 	KCORE_OTHER,
 };
 
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 8c58db2..eb55374 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1070,7 +1070,6 @@
 {
 }
 #endif
-void kvm_arch_irq_routing_update(struct kvm *kvm);
 
 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 {
@@ -1079,6 +1078,8 @@
 
 #endif /* CONFIG_HAVE_KVM_EVENTFD */
 
+void kvm_arch_irq_routing_update(struct kvm *kvm);
+
 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
 {
 	/*
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 66a1f4e..b27315c 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -35,6 +35,7 @@
 	const char		*name;
 	enum led_brightness	 brightness;
 	enum led_brightness	 max_brightness;
+	enum led_brightness	 usr_brightness_req;
 	int			 flags;
 
 	/* Lower 16 bits reflect status */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index be65c03..f510c68 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1452,6 +1452,8 @@
 					size_t *len);
 	int (*inode_create)(struct inode *dir, struct dentry *dentry,
 				umode_t mode);
+	int (*inode_post_create)(struct inode *dir, struct dentry *dentry,
+				umode_t mode);
 	int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
 				struct dentry *new_dentry);
 	int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
@@ -1750,6 +1752,7 @@
 	struct list_head inode_free_security;
 	struct list_head inode_init_security;
 	struct list_head inode_create;
+	struct list_head inode_post_create;
 	struct list_head inode_link;
 	struct list_head inode_unlink;
 	struct list_head inode_symlink;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 270f032..b328cca 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2246,6 +2246,7 @@
 #define FOLL_MLOCK	0x1000	/* lock present pages */
 #define FOLL_REMOTE	0x2000	/* we are working on non-current tsk/mm */
 #define FOLL_COW	0x4000	/* internal GUP flag */
+#define FOLL_ANON	0x8000	/* don't do file mappings */
 
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
 			void *data);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 41d376e..e030a68 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -50,6 +50,13 @@
 	list_add(&page->lru, &lruvec->lists[lru]);
 }
 
+static __always_inline void add_page_to_lru_list_tail(struct page *page,
+				struct lruvec *lruvec, enum lru_list lru)
+{
+	update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
+	list_add_tail(&page->lru, &lruvec->lists[lru]);
+}
+
 static __always_inline void del_page_from_lru_list(struct page *page,
 				struct lruvec *lruvec, enum lru_list lru)
 {
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 7228bcd..149b718 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -652,6 +652,8 @@
 	void *cmdq_private;
 	struct mmc_request	*err_mrq;
 
+	bool inlinecrypt_support;  /* Inline encryption support */
+
 	atomic_t rpmb_req_pending;
 	struct mutex		rpmb_req_mutex;
 	unsigned long		private[0] ____cacheline_aligned;
@@ -692,6 +694,7 @@
 #define mmc_bus_manual_resume(host) ((host)->bus_resume_flags & \
 				MMC_BUSRESUME_MANUAL_RESUME)
 
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual)
 {
 	if (manual)
@@ -699,6 +702,11 @@
 	else
 		host->bus_resume_flags &= ~MMC_BUSRESUME_MANUAL_RESUME;
 }
+#else
+static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual)
+{
+}
+#endif
 
 extern int mmc_resume_bus(struct mmc_host *host);
 
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0c28c28..815d0f4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -245,8 +245,6 @@
 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
 #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
 
-/* Isolate clean file */
-#define ISOLATE_CLEAN		((__force isolate_mode_t)0x1)
 /* Isolate unmapped file */
 #define ISOLATE_UNMAPPED	((__force isolate_mode_t)0x2)
 /* Isolate for asynchronous migration */
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index 81e7e75..79e7daa 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -12,6 +12,7 @@
 #ifndef MSM_GSI_H
 #define MSM_GSI_H
 #include <linux/types.h>
+#include <linux/interrupt.h>
 
 enum gsi_ver {
 	GSI_VER_ERR = 0,
@@ -82,6 +83,9 @@
  * @irq:        IRQ number
  * @phys_addr:  physical address of GSI block
  * @size:       register size of GSI block
+ * @emulator_intcntrlr_addr: the location of emulator's interrupt control block
+ * @emulator_intcntrlr_size: the sise of emulator_intcntrlr_addr
+ * @emulator_intcntrlr_client_isr: client's isr. Called by the emulator's isr
  * @mhi_er_id_limits_valid: valid flag for mhi_er_id_limits
  * @mhi_er_id_limits: MHI event ring start and end ids
  * @notify_cb:  general notification callback
@@ -107,6 +111,9 @@
 	unsigned int irq;
 	phys_addr_t phys_addr;
 	unsigned long size;
+	phys_addr_t emulator_intcntrlr_addr;
+	unsigned long emulator_intcntrlr_size;
+	irq_handler_t emulator_intcntrlr_client_isr;
 	bool mhi_er_id_limits_valid;
 	uint32_t mhi_er_id_limits[2];
 	void (*notify_cb)(struct gsi_per_notify *notify);
@@ -847,6 +854,18 @@
 		union __packed gsi_channel_scratch val);
 
 /**
+ * gsi_read_channel_scratch - Peripheral should call this function to
+ * read the scratch area of the channel context
+ *
+ * @chan_hdl:  Client handle previously obtained from
+ *             gsi_alloc_channel
+ *
+ * @Return gsi_status
+ */
+int gsi_read_channel_scratch(unsigned long chan_hdl,
+		union __packed gsi_channel_scratch *ch_scratch);
+
+/**
  * gsi_start_channel - Peripheral should call this function to
  * start a channel i.e put into running state
  *
@@ -1104,6 +1123,7 @@
  * gsi_alloc_channel (for as many channels as needed; channels can have
  * no event ring, an exclusive event ring or a shared event ring)
  * gsi_write_channel_scratch
+ * gsi_read_channel_scratch
  * gsi_start_channel
  * gsi_queue_xfer/gsi_start_xfer
  * gsi_config_channel_mode/gsi_poll_channel (if clients wants to poll on
@@ -1188,6 +1208,12 @@
 	return -GSI_STATUS_UNSUPPORTED_OP;
 }
 
+static inline int gsi_read_channel_scratch(unsigned long chan_hdl,
+		union __packed gsi_channel_scratch *ch_scratch)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
 static inline int gsi_start_channel(unsigned long chan_hdl)
 {
 	return -GSI_STATUS_UNSUPPORTED_OP;
@@ -1285,7 +1311,8 @@
 	return -GSI_STATUS_UNSUPPORTED_OP;
 }
 
-static inline int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size)
+static inline int gsi_enable_fw(
+	phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver)
 {
 	return -GSI_STATUS_UNSUPPORTED_OP;
 }
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index b63fa45..3529683 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -85,6 +85,7 @@
 	unsigned int write_suspended:1;
 	unsigned int erase_suspended:1;
 	unsigned long in_progress_block_addr;
+	unsigned long in_progress_block_mask;
 
 	struct mutex mutex;
 	wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
index e791ebc..0c5ef54 100644
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -7,6 +7,8 @@
 #define _LINUX_NOSPEC_H
 #include <asm/barrier.h>
 
+struct task_struct;
+
 /**
  * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
  * @index: array element index
@@ -55,4 +57,12 @@
 									\
 	(typeof(_i)) (_i & _mask);					\
 })
+
+/* Speculation control prctl */
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+			     unsigned long ctrl);
+/* Speculation control for seccomp enforced mitigation */
+void arch_seccomp_spec_mitigate(struct task_struct *task);
+
 #endif /* _LINUX_NOSPEC_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index 299aeb1..e44e9a3 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -39,7 +39,9 @@
 	struct property *next;
 	unsigned long _flags;
 	unsigned int unique_id;
+#if defined(CONFIG_OF_KOBJ)
 	struct bin_attribute attr;
+#endif
 };
 
 #if defined(CONFIG_SPARC)
@@ -58,7 +60,9 @@
 	struct	device_node *parent;
 	struct	device_node *child;
 	struct	device_node *sibling;
+#if defined(CONFIG_OF_KOBJ)
 	struct	kobject kobj;
+#endif
 	unsigned long _flags;
 	void	*data;
 #if defined(CONFIG_SPARC)
@@ -102,21 +106,17 @@
 extern struct kobj_type of_node_ktype;
 static inline void of_node_init(struct device_node *node)
 {
+#if defined(CONFIG_OF_KOBJ)
 	kobject_init(&node->kobj, &of_node_ktype);
+#endif
 	node->fwnode.type = FWNODE_OF;
 }
 
-/* true when node is initialized */
-static inline int of_node_is_initialized(struct device_node *node)
-{
-	return node && node->kobj.state_initialized;
-}
-
-/* true when node is attached (i.e. present on sysfs) */
-static inline int of_node_is_attached(struct device_node *node)
-{
-	return node && node->kobj.state_in_sysfs;
-}
+#if defined(CONFIG_OF_KOBJ)
+#define of_node_kobj(n) (&(n)->kobj)
+#else
+#define of_node_kobj(n) NULL
+#endif
 
 #ifdef CONFIG_OF_DYNAMIC
 extern struct device_node *of_node_get(struct device_node *node);
diff --git a/include/linux/pfk.h b/include/linux/pfk.h
new file mode 100644
index 0000000..d7405ea
--- /dev/null
+++ b/include/linux/pfk.h
@@ -0,0 +1,70 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_H_
+#define PFK_H_
+
+#include <linux/bio.h>
+
+struct ice_crypto_setting;
+
+#ifdef CONFIG_PFK
+
+/*
+ * Default key for inline encryption.
+ *
+ * For now only AES-256-XTS is supported, so this is a fixed length.  But if
+ * ever needed, this should be made variable-length with a 'mode' and 'size'.
+ * (Remember to update pfk_allow_merge_bio() when doing so!)
+ */
+#define BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS 64
+
+struct blk_encryption_key {
+	u8 raw[BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS];
+};
+
+int pfk_load_key_start(const struct bio *bio,
+		struct ice_crypto_setting *ice_setting, bool *is_pfe, bool);
+int pfk_load_key_end(const struct bio *bio, bool *is_pfe);
+int pfk_remove_key(const unsigned char *key, size_t key_size);
+bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2);
+void pfk_clear_on_reset(void);
+
+#else
+static inline int pfk_load_key_start(const struct bio *bio,
+	struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async)
+{
+	return -ENODEV;
+}
+
+static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+	return -ENODEV;
+}
+
+static inline int pfk_remove_key(const unsigned char *key, size_t key_size)
+{
+	return -ENODEV;
+}
+
+static inline bool pfk_allow_merge_bio(const struct bio *bio1,
+		const struct bio *bio2)
+{
+	return true;
+}
+
+static inline void pfk_clear_on_reset(void)
+{}
+
+#endif /* CONFIG_PFK */
+
+#endif /* PFK_H */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 2fb9f03..9412480 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -125,6 +125,18 @@
 	POWER_SUPPLY_PL_NON_STACKED_BATFET,
 };
 
+enum {
+	POWER_SUPPLY_PD_INACTIVE = 0,
+	POWER_SUPPLY_PD_ACTIVE,
+	POWER_SUPPLY_PD_PPS_ACTIVE,
+};
+
+enum {
+	POWER_SUPPLY_QC_CTM_DISABLE = BIT(0),
+	POWER_SUPPLY_QC_THERMAL_BALANCE_DISABLE = BIT(1),
+	POWER_SUPPLY_QC_INOV_THERMAL_DISABLE = BIT(2),
+};
+
 enum power_supply_property {
 	/* Properties of type `int' */
 	POWER_SUPPLY_PROP_STATUS = 0,
@@ -282,6 +294,14 @@
 	POWER_SUPPLY_PROP_ALLOW_HVDCP3,
 	POWER_SUPPLY_PROP_HVDCP_OPTI_ALLOWED,
 	POWER_SUPPLY_PROP_MAX_PULSE_ALLOWED,
+	POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE,
+	POWER_SUPPLY_PROP_BATTERY_INFO,
+	POWER_SUPPLY_PROP_BATTERY_INFO_ID,
+	POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION,
+	POWER_SUPPLY_PROP_ESR_ACTUAL,
+	POWER_SUPPLY_PROP_ESR_NOMINAL,
+	POWER_SUPPLY_PROP_SOH,
+	POWER_SUPPLY_PROP_QC_OPTI_DISABLE,
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
 	/* Properties of type `const char *' */
diff --git a/include/linux/property.h b/include/linux/property.h
index 338f9b7..459337f 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -187,7 +187,7 @@
  */
 
 #define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_)	\
-{								\
+(struct property_entry) {					\
 	.name = _name_,						\
 	.length = ARRAY_SIZE(_val_) * sizeof(_type_),		\
 	.is_array = true,					\
@@ -205,7 +205,7 @@
 	PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_)
 
 #define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_)		\
-{								\
+(struct property_entry) {					\
 	.name = _name_,						\
 	.length = ARRAY_SIZE(_val_) * sizeof(const char *),	\
 	.is_array = true,					\
@@ -214,7 +214,7 @@
 }
 
 #define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_)	\
-{							\
+(struct property_entry) {				\
 	.name = _name_,					\
 	.length = sizeof(_type_),			\
 	.is_string = false,				\
@@ -231,7 +231,7 @@
 	PROPERTY_ENTRY_INTEGER(_name_, u64, _val_)
 
 #define PROPERTY_ENTRY_STRING(_name_, _val_)		\
-{							\
+(struct property_entry) {				\
 	.name = _name_,					\
 	.length = sizeof(_val_),			\
 	.is_string = true,				\
@@ -239,7 +239,7 @@
 }
 
 #define PROPERTY_ENTRY_BOOL(_name_)		\
-{						\
+(struct property_entry) {			\
 	.name = _name_,				\
 }
 
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 05c6d20..ac377a2 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -351,7 +351,7 @@
 
 static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
 {
-	if (size * sizeof(void *) > KMALLOC_MAX_SIZE)
+	if (size > KMALLOC_MAX_SIZE / sizeof(void *))
 		return NULL;
 	return kcalloc(size, sizeof(void *), gfp);
 }
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
new file mode 100644
index 0000000..600aadf
--- /dev/null
+++ b/include/linux/refcount.h
@@ -0,0 +1,294 @@
+#ifndef _LINUX_REFCOUNT_H
+#define _LINUX_REFCOUNT_H
+
+/*
+ * Variant of atomic_t specialized for reference counts.
+ *
+ * The interface matches the atomic_t interface (to aid in porting) but only
+ * provides the few functions one should use for reference counting.
+ *
+ * It differs in that the counter saturates at UINT_MAX and will not move once
+ * there. This avoids wrapping the counter and causing 'spurious'
+ * use-after-free issues.
+ *
+ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
+ * and provide only what is strictly required for refcounts.
+ *
+ * The increments are fully relaxed; these will not provide ordering. The
+ * rationale is that whatever is used to obtain the object we're increasing the
+ * reference count on will provide the ordering. For locked data structures,
+ * its the lock acquire, for RCU/lockless data structures its the dependent
+ * load.
+ *
+ * Do note that inc_not_zero() provides a control dependency which will order
+ * future stores against the inc, this ensures we'll never modify the object
+ * if we did not in fact acquire a reference.
+ *
+ * The decrements will provide release order, such that all the prior loads and
+ * stores will be issued before, it also provides a control dependency, which
+ * will order us against the subsequent free().
+ *
+ * The control dependency is against the load of the cmpxchg (ll/sc) that
+ * succeeded. This means the stores aren't fully ordered, but this is fine
+ * because the 1->0 transition indicates no concurrency.
+ *
+ * Note that the allocator is responsible for ordering things between free()
+ * and alloc().
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_DEBUG_REFCOUNT
+#define REFCOUNT_WARN(cond, str) WARN_ON(cond)
+#define __refcount_check	__must_check
+#else
+#define REFCOUNT_WARN(cond, str) (void)(cond)
+#define __refcount_check
+#endif
+
+typedef struct refcount_struct {
+	atomic_t refs;
+} refcount_t;
+
+#define REFCOUNT_INIT(n)	{ .refs = ATOMIC_INIT(n), }
+
+static inline void refcount_set(refcount_t *r, unsigned int n)
+{
+	atomic_set(&r->refs, n);
+}
+
+static inline unsigned int refcount_read(const refcount_t *r)
+{
+	return atomic_read(&r->refs);
+}
+
+static inline __refcount_check
+bool refcount_add_not_zero(unsigned int i, refcount_t *r)
+{
+	unsigned int old, new, val = atomic_read(&r->refs);
+
+	for (;;) {
+		if (!val)
+			return false;
+
+		if (unlikely(val == UINT_MAX))
+			return true;
+
+		new = val + i;
+		if (new < val)
+			new = UINT_MAX;
+		old = atomic_cmpxchg_relaxed(&r->refs, val, new);
+		if (old == val)
+			break;
+
+		val = old;
+	}
+
+	REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+
+	return true;
+}
+
+static inline void refcount_add(unsigned int i, refcount_t *r)
+{
+	REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
+}
+
+/*
+ * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_inc_not_zero(refcount_t *r)
+{
+	unsigned int old, new, val = atomic_read(&r->refs);
+
+	for (;;) {
+		new = val + 1;
+
+		if (!val)
+			return false;
+
+		if (unlikely(!new))
+			return true;
+
+		old = atomic_cmpxchg_relaxed(&r->refs, val, new);
+		if (old == val)
+			break;
+
+		val = old;
+	}
+
+	REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+
+	return true;
+}
+
+/*
+ * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller already has a
+ * reference on the object, will WARN when this is not so.
+ */
+static inline void refcount_inc(refcount_t *r)
+{
+	REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+}
+
+/*
+ * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
+ * decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+{
+	unsigned int old, new, val = atomic_read(&r->refs);
+
+	for (;;) {
+		if (unlikely(val == UINT_MAX))
+			return false;
+
+		new = val - i;
+		if (new > val) {
+			REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+			return false;
+		}
+
+		old = atomic_cmpxchg_release(&r->refs, val, new);
+		if (old == val)
+			break;
+
+		val = old;
+	}
+
+	return !new;
+}
+
+static inline __refcount_check
+bool refcount_dec_and_test(refcount_t *r)
+{
+	return refcount_sub_and_test(1, r);
+}
+
+/*
+ * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
+ * when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before.
+ */
+static inline
+void refcount_dec(refcount_t *r)
+{
+	REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
+}
+
+/*
+ * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
+ * success thereof.
+ *
+ * Like all decrement operations, it provides release memory order and provides
+ * a control dependency.
+ *
+ * It can be used like a try-delete operator; this explicit case is provided
+ * and not cmpxchg in generic, because that would allow implementing unsafe
+ * operations.
+ */
+static inline __refcount_check
+bool refcount_dec_if_one(refcount_t *r)
+{
+	return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
+}
+
+/*
+ * No atomic_t counterpart, it decrements unless the value is 1, in which case
+ * it will return false.
+ *
+ * Was often done like: atomic_add_unless(&var, -1, 1)
+ */
+static inline __refcount_check
+bool refcount_dec_not_one(refcount_t *r)
+{
+	unsigned int old, new, val = atomic_read(&r->refs);
+
+	for (;;) {
+		if (unlikely(val == UINT_MAX))
+			return true;
+
+		if (val == 1)
+			return false;
+
+		new = val - 1;
+		if (new > val) {
+			REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+			return true;
+		}
+
+		old = atomic_cmpxchg_release(&r->refs, val, new);
+		if (old == val)
+			break;
+
+		val = old;
+	}
+
+	return true;
+}
+
+/*
+ * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
+ * to decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
+{
+	if (refcount_dec_not_one(r))
+		return false;
+
+	mutex_lock(lock);
+	if (!refcount_dec_and_test(r)) {
+		mutex_unlock(lock);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
+ * decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
+{
+	if (refcount_dec_not_one(r))
+		return false;
+
+	spin_lock(lock);
+	if (!refcount_dec_and_test(r)) {
+		spin_unlock(lock);
+		return false;
+	}
+
+	return true;
+}
+
+#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0748b7b..6b03b08 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2629,6 +2629,8 @@
 #define PFA_SPREAD_PAGE  1      /* Spread page cache over cpuset */
 #define PFA_SPREAD_SLAB  2      /* Spread some slab caches over cpuset */
 #define PFA_LMK_WAITING  3      /* Lowmemorykiller is waiting */
+#define PFA_SPEC_SSB_DISABLE		4	/* Speculative Store Bypass disabled */
+#define PFA_SPEC_SSB_FORCE_DISABLE	5	/* Speculative Store Bypass force disabled*/
 
 
 #define TASK_PFA_TEST(name, func)					\
@@ -2655,6 +2657,13 @@
 TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
 TASK_PFA_SET(LMK_WAITING, lmk_waiting)
 
+TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
+
+TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+
 /*
  * task->jobctl flags
  */
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index ecc296c..50c460a 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -3,7 +3,8 @@
 
 #include <uapi/linux/seccomp.h>
 
-#define SECCOMP_FILTER_FLAG_MASK	(SECCOMP_FILTER_FLAG_TSYNC)
+#define SECCOMP_FILTER_FLAG_MASK	(SECCOMP_FILTER_FLAG_TSYNC	| \
+					 SECCOMP_FILTER_FLAG_SPEC_ALLOW)
 
 #ifdef CONFIG_SECCOMP
 
diff --git a/include/linux/security.h b/include/linux/security.h
index 3632428..bfb1b74 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -30,6 +30,7 @@
 #include <linux/string.h>
 #include <linux/mm.h>
 #include <linux/fs.h>
+#include <linux/bio.h>
 
 struct linux_binprm;
 struct cred;
@@ -256,6 +257,8 @@
 				     const struct qstr *qstr, const char **name,
 				     void **value, size_t *len);
 int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+					umode_t mode);
 int security_inode_link(struct dentry *old_dentry, struct inode *dir,
 			 struct dentry *new_dentry);
 int security_inode_unlink(struct inode *dir, struct dentry *dentry);
@@ -304,6 +307,7 @@
 				 struct fown_struct *fown, int sig);
 int security_file_receive(struct file *file);
 int security_file_open(struct file *file, const struct cred *cred);
+
 int security_task_create(unsigned long clone_flags);
 void security_task_free(struct task_struct *task);
 int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
@@ -637,6 +641,13 @@
 	return 0;
 }
 
+static inline int security_inode_post_create(struct inode *dir,
+					struct dentry *dentry,
+					umode_t mode)
+{
+	return 0;
+}
+
 static inline int security_inode_link(struct dentry *old_dentry,
 				       struct inode *dir,
 				       struct dentry *new_dentry)
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 1a94397..42e3f06 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -348,10 +348,10 @@
 	char	name[16];
 	char	compatible[128];
 	int	(*setup)(struct earlycon_device *, const char *options);
-} __aligned(32);
+};
 
-extern const struct earlycon_id __earlycon_table[];
-extern const struct earlycon_id __earlycon_table_end[];
+extern const struct earlycon_id *__earlycon_table[];
+extern const struct earlycon_id *__earlycon_table_end[];
 
 #if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
 #define EARLYCON_USED_OR_UNUSED	__used
@@ -359,12 +359,19 @@
 #define EARLYCON_USED_OR_UNUSED	__maybe_unused
 #endif
 
-#define OF_EARLYCON_DECLARE(_name, compat, fn)				\
-	static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name)	\
-	     EARLYCON_USED_OR_UNUSED __section(__earlycon_table)	\
+#define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id)		\
+	static const struct earlycon_id unique_id			\
+	     EARLYCON_USED_OR_UNUSED __initconst			\
 		= { .name = __stringify(_name),				\
 		    .compatible = compat,				\
-		    .setup = fn  }
+		    .setup = fn  };					\
+	static const struct earlycon_id EARLYCON_USED_OR_UNUSED		\
+		__section(__earlycon_table)				\
+		* const __PASTE(__p, unique_id) = &unique_id
+
+#define OF_EARLYCON_DECLARE(_name, compat, fn)				\
+	_OF_EARLYCON_DECLARE(_name, compat, fn,				\
+			     __UNIQUE_ID(__earlycon_##_name))
 
 #define EARLYCON_DECLARE(_name, fn)	OF_EARLYCON_DECLARE(_name, "", fn)
 
diff --git a/include/linux/signal.h b/include/linux/signal.h
index b63f63e..5308304 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -97,6 +97,23 @@
 	}
 }
 
+static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2)
+{
+	switch (_NSIG_WORDS) {
+	case 4:
+		return	(set1->sig[3] == set2->sig[3]) &&
+			(set1->sig[2] == set2->sig[2]) &&
+			(set1->sig[1] == set2->sig[1]) &&
+			(set1->sig[0] == set2->sig[0]);
+	case 2:
+		return	(set1->sig[1] == set2->sig[1]) &&
+			(set1->sig[0] == set2->sig[0]);
+	case 1:
+		return	set1->sig[0] == set2->sig[0];
+	}
+	return 0;
+}
+
 #define sigmask(sig)	(1UL << ((sig) - 1))
 
 #ifndef __HAVE_ARCH_SIG_SETOPS
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 448321b..90d8569 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -378,6 +378,8 @@
 extern void swsusp_set_page_free(struct page *);
 extern void swsusp_unset_page_free(struct page *);
 extern unsigned long get_safe_page(gfp_t gfp_mask);
+extern asmlinkage int swsusp_arch_suspend(void);
+extern asmlinkage int swsusp_arch_resume(void);
 
 extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
 extern int hibernate(void);
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 00a1f33..9c452f6d 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -518,7 +518,7 @@
 }
 
 static inline struct kernfs_node *sysfs_get_dirent(struct kernfs_node *parent,
-						   const unsigned char *name)
+						   const char *name)
 {
 	return kernfs_find_and_get(parent, name);
 }
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 6f1ee85..fe1b862 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -657,7 +657,7 @@
 extern int tty_set_ldisc(struct tty_struct *tty, int disc);
 extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
 extern void tty_ldisc_release(struct tty_struct *tty);
-extern void tty_ldisc_init(struct tty_struct *tty);
+extern int __must_check tty_ldisc_init(struct tty_struct *tty);
 extern void tty_ldisc_deinit(struct tty_struct *tty);
 extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
 				 char *f, int count);
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 7f4367b..87e97bb 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -45,6 +45,9 @@
 #define FUNC_SUSPEND_OPT_SUSP_MASK BIT(0)
 #define FUNC_SUSPEND_OPT_RW_EN_MASK BIT(1)
 
+#define FUNC_WAKEUP_CAPABLE_SHIFT  0
+#define FUNC_WAKEUP_ENABLE_SHIFT   1
+
 /*
  * USB function drivers should return USB_GADGET_DELAYED_STATUS if they
  * wish to delay the data/status stages of the control transfer till they
@@ -57,6 +60,9 @@
 /* big enough to hold our biggest descriptor */
 #define USB_COMP_EP0_BUFSIZ	4096
 
+/* OS feature descriptor length <= 4kB */
+#define USB_COMP_EP0_OS_DESC_BUFSIZ	4096
+
 #define USB_MS_TO_HS_INTERVAL(x)	(ilog2((x * 1000 / 125)) + 1)
 struct usb_configuration;
 
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index c40355f..d4f1759 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -44,6 +44,33 @@
 };
 
 /**
+ * Different states involved in USB charger detection.
+ *
+ * USB_CHG_STATE_UNDEFINED	USB charger is not connected or detection
+ *                              process is not yet started.
+ * USB_CHG_STATE_IN_PROGRESS	Charger detection in progress
+ * USB_CHG_STATE_WAIT_FOR_DCD	Waiting for Data pins contact.
+ * USB_CHG_STATE_DCD_DONE	Data pin contact is detected.
+ * USB_CHG_STATE_PRIMARY_DONE	Primary detection is completed (Detects
+ *                              between SDP and DCP/CDP).
+ * USB_CHG_STATE_SECONDARY_DONE	Secondary detection is completed (Detects
+ *                              between DCP and CDP).
+ * USB_CHG_STATE_DETECTED	USB charger type is determined.
+ * USB_CHG_STATE_QUEUE_SM_WORK	SM work to start/stop gadget is queued.
+ *
+ */
+enum usb_chg_state {
+	USB_CHG_STATE_UNDEFINED = 0,
+	USB_CHG_STATE_IN_PROGRESS,
+	USB_CHG_STATE_WAIT_FOR_DCD,
+	USB_CHG_STATE_DCD_DONE,
+	USB_CHG_STATE_PRIMARY_DONE,
+	USB_CHG_STATE_SECONDARY_DONE,
+	USB_CHG_STATE_DETECTED,
+	USB_CHG_STATE_QUEUE_SM_WORK,
+};
+
+/**
  * USB charger types
  *
  * USB_INVALID_CHARGER	Invalid USB charger.
@@ -126,7 +153,10 @@
  * @async_int: IRQ line on which ASYNC interrupt arrived in LPM.
  * @cur_power: The amount of mA available from downstream port.
  * @otg_wq: Strict order otg workqueue for OTG works (SM/ID/SUSPEND).
+ * @chg_work: Charger detection work.
+ * @chg_state: The state of charger detection process.
  * @chg_type: The type of charger attached.
+ * @chg_detection: True if PHY is doing charger type detection.
  * @bus_perf_client: Bus performance client handle to request BUS bandwidth
  * @host_bus_suspend: indicates host bus suspend or not.
  * @device_bus_suspend: indicates device bus suspend or not.
@@ -149,6 +179,7 @@
  * @max_nominal_system_clk_rate: max freq at which system clock can run in
 		nominal mode.
  * @sdp_check: SDP detection work in case of USB_FLOAT power supply
+ * @notify_charger_work: Charger notification work.
  */
 struct msm_otg {
 	struct usb_phy phy;
@@ -191,8 +222,11 @@
 	int async_int;
 	unsigned int cur_power;
 	struct workqueue_struct *otg_wq;
+	struct delayed_work chg_work;
 	struct delayed_work id_status_work;
+	enum usb_chg_state chg_state;
 	enum usb_chg_type chg_type;
+	bool chg_detection;
 	unsigned int dcd_time;
 	unsigned long caps;
 	uint32_t bus_perf_client;
@@ -278,7 +312,7 @@
 	struct pm_qos_request pm_qos_req_dma;
 	struct delayed_work perf_vote_work;
 	struct delayed_work sdp_check;
-	struct work_struct notify_chg_current_work;
+	struct work_struct notify_charger_work;
 };
 
 struct ci13xxx_platform_data {
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index d5eb547..3f8f3505 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -143,6 +143,9 @@
 int virtio_device_restore(struct virtio_device *dev);
 #endif
 
+#define virtio_device_for_each_vq(vdev, vq) \
+	list_for_each_entry(vq, &vdev->vqs, list)
+
 /**
  * virtio_driver - operations for a virtio I/O driver
  * @driver: underlying device driver (populate name and owner).
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 9a8eb83..3eed4f1 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -43,7 +43,7 @@
  */
 enum wb_reason {
 	WB_REASON_BACKGROUND,
-	WB_REASON_TRY_TO_FREE_PAGES,
+	WB_REASON_VMSCAN,
 	WB_REASON_SYNC,
 	WB_REASON_PERIODIC,
 	WB_REASON_LAPTOP_TIMER,
diff --git a/include/media/msm_cam_sensor.h b/include/media/msm_cam_sensor.h
index 2c8b651..54cd27b 100644
--- a/include/media/msm_cam_sensor.h
+++ b/include/media/msm_cam_sensor.h
@@ -26,6 +26,21 @@
 	uint16_t size_down;
 };
 
+struct msm_camera_i2c_reg_setting32 {
+	compat_uptr_t reg_setting;
+	uint16_t size;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	enum msm_camera_i2c_data_type data_type;
+	uint16_t delay;
+};
+
+struct msm_sensor_id_info_t32 {
+	unsigned short sensor_id_reg_addr;
+	unsigned short sensor_id;
+	unsigned short sensor_id_mask;
+	struct msm_camera_i2c_reg_setting32 setting;
+};
+
 struct msm_camera_sensor_slave_info32 {
 	char sensor_name[32];
 	char eeprom_name[32];
@@ -36,7 +51,7 @@
 	uint16_t slave_addr;
 	enum i2c_freq_mode_t i2c_freq_mode;
 	enum msm_camera_i2c_reg_addr_type addr_type;
-	struct msm_sensor_id_info_t sensor_id_info;
+	struct msm_sensor_id_info_t32 sensor_id_info;
 	struct msm_sensor_power_setting_array32 power_setting_array;
 	uint8_t  is_init_params_valid;
 	struct msm_sensor_init_params sensor_init_params;
@@ -128,14 +143,6 @@
 	uint16_t delay;
 };
 
-struct msm_camera_i2c_reg_setting32 {
-	compat_uptr_t reg_setting;
-	uint16_t size;
-	enum msm_camera_i2c_reg_addr_type addr_type;
-	enum msm_camera_i2c_data_type data_type;
-	uint16_t delay;
-};
-
 struct msm_camera_i2c_array_write_config32 {
 	struct msm_camera_i2c_reg_setting32 conf_array;
 	uint16_t slave_addr;
diff --git a/include/media/msmb_isp.h b/include/media/msmb_isp.h
index 95679cb..176dd2e 100644
--- a/include/media/msmb_isp.h
+++ b/include/media/msmb_isp.h
@@ -28,6 +28,19 @@
 		struct msm_isp_sof_info sof_info;
 	} u;
 };
+
+struct msm_isp32_event_data32 {
+	struct compat_timeval timestamp;
+	struct compat_timeval mono_timestamp;
+	enum msm_vfe_input_src input_intf;
+	uint32_t frame_id;
+	union {
+		struct msm_isp_stats_event stats;
+		struct msm_isp_buf_event buf_done;
+		struct msm_isp32_error_info error_info;
+	} u;
+};
+
 #endif
 #ifdef CONFIG_MSM_AVTIMER
 struct avtimer_fptr_t {
diff --git a/include/net/bonding.h b/include/net/bonding.h
index f32f7ef..7734cc9 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -197,6 +197,7 @@
 	struct   slave __rcu *primary_slave;
 	struct   bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
 	bool     force_primary;
+	u32      nest_level;
 	s32      slave_cnt; /* never change this value outside the attach/detach wrappers */
 	int     (*recv_probe)(const struct sk_buff *, struct bonding *,
 			      struct slave *);
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index c9b3eb7..567017b 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -55,6 +55,7 @@
 #define tw_family		__tw_common.skc_family
 #define tw_state		__tw_common.skc_state
 #define tw_reuse		__tw_common.skc_reuse
+#define tw_reuseport		__tw_common.skc_reuseport
 #define tw_ipv6only		__tw_common.skc_ipv6only
 #define tw_bound_dev_if		__tw_common.skc_bound_dev_if
 #define tw_node			__tw_common.skc_nulls_node
diff --git a/include/net/ip.h b/include/net/ip.h
index ad065a4..b1b5ee0 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -307,6 +307,13 @@
 	return --iph->ttl;
 }
 
+static inline int ip_mtu_locked(const struct dst_entry *dst)
+{
+	const struct rtable *rt = (const struct rtable *)dst;
+
+	return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
+}
+
 static inline
 int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
 {
@@ -314,7 +321,7 @@
 
 	return  pmtudisc == IP_PMTUDISC_DO ||
 		(pmtudisc == IP_PMTUDISC_WANT &&
-		 !(dst_metric_locked(dst, RTAX_MTU)));
+		 !ip_mtu_locked(dst));
 }
 
 static inline bool ip_sk_accept_pmtu(const struct sock *sk)
@@ -340,7 +347,7 @@
 	struct net *net = dev_net(dst->dev);
 
 	if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
-	    dst_metric_locked(dst, RTAX_MTU) ||
+	    ip_mtu_locked(dst) ||
 	    !forwarding)
 		return dst_mtu(dst);
 
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index aa75828..978387d 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -57,6 +57,7 @@
 	int				fnhe_genid;
 	__be32				fnhe_daddr;
 	u32				fnhe_pmtu;
+	bool				fnhe_mtu_locked;
 	__be32				fnhe_gw;
 	unsigned long			fnhe_expires;
 	struct rtable __rcu		*fnhe_rth_input;
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index fe994d2..df528a6 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -97,13 +97,14 @@
 
 struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
 			  struct proto *prot, int kern);
+void llc_sk_stop_all_timers(struct sock *sk, bool sync);
 void llc_sk_free(struct sock *sk);
 
 void llc_sk_reset(struct sock *sk);
 
 /* Access to a connection */
 int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
-void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
 void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
 void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
 void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 8fd61bc..920a771 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -4091,7 +4091,7 @@
  * The TX headroom reserved by mac80211 for its own tx_status functions.
  * This is enough for the radiotap header.
  */
-#define IEEE80211_TX_STATUS_HEADROOM	14
+#define IEEE80211_TX_STATUS_HEADROOM	ALIGN(14, 4)
 
 /**
  * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 3334dbf..7fc7866 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -6,7 +6,7 @@
 
 static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining)
 {
-	return remaining >= sizeof(*rtnh) &&
+	return remaining >= (int)sizeof(*rtnh) &&
 	       rtnh->rtnh_len >= sizeof(*rtnh) &&
 	       rtnh->rtnh_len <= remaining;
 }
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index ebc5a2e..f83cacc 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -78,7 +78,7 @@
 	int wiphy_idx;
 	enum nl80211_reg_initiator initiator;
 	enum nl80211_user_reg_hint_type user_reg_hint_type;
-	char alpha2[2];
+	char alpha2[3];
 	enum nl80211_dfs_regions dfs_region;
 	bool intersect;
 	bool processed;
diff --git a/include/net/route.h b/include/net/route.h
index c0874c8..2702b7a 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -63,7 +63,8 @@
 	__be32			rt_gateway;
 
 	/* Miscellaneous cached information */
-	u32			rt_pmtu;
+	u32			rt_mtu_locked:1,
+				rt_pmtu:31;
 
 	u32			rt_table_id;
 
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 7e5430d..08b1b7b 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -662,6 +662,9 @@
 	/* The controller does not support WRITE SAME */
 	unsigned no_write_same:1;
 
+	/* Inline encryption support? */
+	unsigned inlinecrypt_support:1;
+
 	unsigned use_blk_mq:1;
 	unsigned use_cmd_list:1;
 
diff --git a/include/soc/qcom/camera2.h b/include/soc/qcom/camera2.h
index c529aff..5139d22 100644
--- a/include/soc/qcom/camera2.h
+++ b/include/soc/qcom/camera2.h
@@ -47,6 +47,7 @@
 	uint16_t sensor_id_reg_addr;
 	uint16_t sensor_id;
 	uint16_t sensor_id_mask;
+	struct msm_camera_i2c_reg_setting *setting;
 };
 
 struct msm_cam_clk_info {
diff --git a/include/soc/qcom/clock-pll.h b/include/soc/qcom/clock-pll.h
index 1865e3c..dd7e186 100644
--- a/include/soc/qcom/clock-pll.h
+++ b/include/soc/qcom/clock-pll.h
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2015, 2017-2018, The Linux Foundation.
+ * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -174,6 +175,7 @@
 
 extern const struct clk_ops clk_ops_local_pll;
 extern const struct clk_ops clk_ops_sr2_pll;
+extern const struct clk_ops clk_ops_acpu_pll;
 extern const struct clk_ops clk_ops_variable_rate_pll;
 extern const struct clk_ops clk_ops_variable_rate_pll_hwfsm;
 
diff --git a/include/soc/qcom/pm-legacy.h b/include/soc/qcom/pm-legacy.h
new file mode 100644
index 0000000..0ae5d7b
--- /dev/null
+++ b/include/soc/qcom/pm-legacy.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2016, 2018, The Linux Foundation. All rights reserved.
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_PM_H
+#define __ARCH_ARM_MACH_MSM_PM_H
+
+#include <linux/types.h>
+#include <linux/cpuidle.h>
+#include <asm/smp_plat.h>
+#include <asm/barrier.h>
+#include <dt-bindings/msm/pm.h>
+
+#if !defined(CONFIG_SMP)
+#define msm_secondary_startup NULL
+#elif defined(CONFIG_CPU_V7)
+#define msm_secondary_startup secondary_startup
+#else
+#define msm_secondary_startup secondary_holding_pen
+#endif
+
+enum msm_pm_sleep_mode {
+	MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
+	MSM_PM_SLEEP_MODE_RETENTION,
+	MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
+	MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
+	MSM_PM_SLEEP_MODE_FASTPC,
+	MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND,
+	MSM_PM_SLEEP_MODE_NR,
+	MSM_PM_SLEEP_MODE_NOT_SELECTED,
+};
+
+enum msm_pm_l2_scm_flag {
+	MSM_SCM_L2_ON = 0,
+	MSM_SCM_L2_OFF = 1,
+	MSM_SCM_L2_GDHS = 3,
+	MSM_SCM_L3_PC_OFF = 4,
+};
+
+#define MSM_PM_MODE(cpu, mode_nr)  ((cpu) *MSM_PM_SLEEP_MODE_NR + (mode_nr))
+
+struct msm_pm_time_params {
+	uint32_t latency_us;
+	uint32_t sleep_us;
+	uint32_t next_event_us;
+	uint32_t modified_time_us;
+};
+
+struct msm_pm_sleep_status_data {
+	void __iomem *base_addr;
+	uint32_t mask;
+};
+
+struct latency_level {
+	int affinity_level;
+	int reset_level;
+	const char *level_name;
+};
+
+/**
+ * lpm_cpu_pre_pc_cb(): API to get the L2 flag to pass to TZ
+ *
+ * @cpu: cpuid of the CPU going down.
+ *
+ * Returns the l2 flush flag enum that is passed down to TZ during power
+ * collaps
+ */
+enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu);
+
+/**
+ * msm_pm_sleep_mode_allow() - API to determine if sleep mode is allowed.
+ * @cpu:	CPU on which to check for the sleep mode.
+ * @mode:	Sleep Mode to check for.
+ * @idle:	Idle or Suspend Sleep Mode.
+ *
+ * Helper function to determine if a Idle or Suspend
+ * Sleep mode is allowed for a specific CPU.
+ *
+ * Return: 1 for allowed; 0 if not allowed.
+ */
+int msm_pm_sleep_mode_allow(unsigned int cpu, unsigned int mode, bool idle);
+
+/**
+ * msm_pm_sleep_mode_supported() - API to determine if sleep mode is
+ * supported.
+ * @cpu:	CPU on which to check for the sleep mode.
+ * @mode:	Sleep Mode to check for.
+ * @idle:	Idle or Suspend Sleep Mode.
+ *
+ * Helper function to determine if a Idle or Suspend
+ * Sleep mode is allowed and enabled for a specific CPU.
+ *
+ * Return: 1 for supported; 0 if not supported.
+ */
+int msm_pm_sleep_mode_supported(unsigned int cpu, unsigned int mode, bool idle);
+
+struct msm_pm_cpr_ops {
+	void (*cpr_suspend)(void);
+	void (*cpr_resume)(void);
+};
+
+void __init msm_pm_set_tz_retention_flag(unsigned int flag);
+void msm_pm_enable_retention(bool enable);
+bool msm_pm_retention_enabled(void);
+bool msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode, bool from_idle);
+static inline void msm_arch_idle(void)
+{
+	/* memory barrier */
+	mb();
+	wfi();
+}
+
+#ifdef CONFIG_MSM_PM_LEGACY
+
+void msm_pm_set_rpm_wakeup_irq(unsigned int irq);
+int msm_pm_wait_cpu_shutdown(unsigned int cpu);
+int __init msm_pm_sleep_status_init(void);
+void lpm_cpu_hotplug_enter(unsigned int cpu);
+s32 msm_cpuidle_get_deep_idle_latency(void);
+int msm_pm_collapse(unsigned long unused);
+
+/**
+ * lpm_get_latency() - API to get latency for a low power mode
+ * @latency_level:	pointer to structure with below elements
+ * affinity_level: The level (CPU/L2/CCI etc.) for which the
+ *	latency is required.
+ *	LPM_AFF_LVL_CPU : CPU level
+ *	LPM_AFF_LVL_L2  : L2 level
+ *	LPM_AFF_LVL_CCI : CCI level
+ * reset_level: Can be passed "LPM_RESET_LVL_GDHS" for
+ *	low power mode with control logic power collapse or
+ *	"LPM_RESET_LVL_PC" for low power mode with control and
+ *	memory logic power collapse or "LPM_RESET_LVL_RET" for
+ *	retention mode.
+ * level_name: Pointer to the cluster name for which the latency
+ *	is required or NULL if the minimum value out of all the
+ *	clusters is to be returned. For CPU level, the name of the
+ *	L2 cluster to be passed. For CCI it has no effect.
+ * @latency:	address to get the latency value.
+ *
+ * latency value will be for the particular cluster or the minimum
+ * value out of all the clusters at the particular affinity_level
+ * and reset_level.
+ *
+ * Return: 0 for success; Error number for failure.
+ */
+int lpm_get_latency(struct latency_level *level, uint32_t *latency);
+
+#else
+static inline void msm_pm_set_rpm_wakeup_irq(unsigned int irq) {}
+static inline int msm_pm_wait_cpu_shutdown(unsigned int cpu) { return 0; }
+static inline int msm_pm_sleep_status_init(void) { return 0; };
+
+static inline void lpm_cpu_hotplug_enter(unsigned int cpu)
+{
+	msm_arch_idle();
+};
+
+static inline s32 msm_cpuidle_get_deep_idle_latency(void) { return 0; }
+#define msm_pm_collapse NULL
+
+static inline int lpm_get_latency(struct latency_level *level,
+						uint32_t *latency)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+void qcom_cpu_die_legacy(unsigned int cpu);
+int qcom_cpu_kill_legacy(unsigned int cpu);
+int msm_platform_secondary_init(unsigned int cpu);
+#else
+static inline int msm_platform_secondary_init(unsigned int cpu) { return 0; }
+static inline void qcom_cpu_die_legacy(unsigned int cpu) {}
+static inline int qcom_cpu_kill_legacy(unsigned int cpu) { return 0; }
+#endif
+
+enum msm_pm_time_stats_id {
+	MSM_PM_STAT_REQUESTED_IDLE = 0,
+	MSM_PM_STAT_IDLE_SPIN,
+	MSM_PM_STAT_IDLE_WFI,
+	MSM_PM_STAT_RETENTION,
+	MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
+	MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
+	MSM_PM_STAT_IDLE_POWER_COLLAPSE,
+	MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
+	MSM_PM_STAT_SUSPEND,
+	MSM_PM_STAT_FAILED_SUSPEND,
+	MSM_PM_STAT_NOT_IDLE,
+	MSM_PM_STAT_COUNT
+};
+
+#ifdef CONFIG_MSM_IDLE_STATS
+void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, int size);
+void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t);
+void msm_pm_l2_add_stat(uint32_t id, int64_t t);
+#else
+static inline void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats,
+		int size) {}
+static inline void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t) {}
+static inline void msm_pm_l2_add_stat(uint32_t id, int64_t t) {}
+#endif
+
+void msm_pm_set_cpr_ops(struct msm_pm_cpr_ops *ops);
+extern dma_addr_t msm_pc_debug_counters_phys;
+#endif  /* __ARCH_ARM_MACH_MSM_PM_H */
diff --git a/include/soc/qcom/qseecomi.h b/include/soc/qcom/qseecomi.h
index a7d4190..d249168 100644
--- a/include/soc/qcom/qseecomi.h
+++ b/include/soc/qcom/qseecomi.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
 #define QSEECOM_KEY_ID_SIZE   32
 
 #define QSEOS_RESULT_FAIL_SEND_CMD_NO_THREAD  -19   /*0xFFFFFFED*/
+#define QSEOS_RESULT_FAIL_APP_ALREADY_LOADED  -38   /*0xFFFFFFDA*/
 #define QSEOS_RESULT_FAIL_UNSUPPORTED_CE_PIPE -63
 #define QSEOS_RESULT_FAIL_KS_OP               -64
 #define QSEOS_RESULT_FAIL_KEY_ID_EXISTS       -65
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index 5f496a8..d4ebb7d 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -102,16 +102,22 @@
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm845")
 #define early_machine_is_sdm670()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm670")
+#define early_machine_is_sxr1130()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sxr1130")
 #define early_machine_is_qcs605()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,qcs605")
 #define early_machine_is_sda670()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sda670")
 #define early_machine_is_sdm710()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm670")
+#define early_machine_is_sxr1120()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sxr1120")
 #define early_machine_is_msm8953()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8953")
 #define early_machine_is_msm8937()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8937")
+#define early_machine_is_msm8917()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8917")
 #define early_machine_is_mdm9607()      \
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,mdm9607")
 #define early_machine_is_sdm450()	\
@@ -169,11 +175,14 @@
 #define early_machine_is_sdxpoorwills()	0
 #define early_machine_is_sdm845()	0
 #define early_machine_is_sdm670()	0
+#define early_machine_is_sxr1130()	0
 #define early_machine_is_qcs605()	0
 #define early_machine_is_sda670()	0
 #define early_machine_is_sdm710()	0
+#define early_machine_is_sxr1120()	0
 #define early_machine_is_msm8953()	0
 #define early_machine_is_msm8937()	0
+#define early_machine_is_msm8917()	0
 #define early_machine_is_sdm450()	0
 #define early_machine_is_sdm632()	0
 #define early_machine_is_sdm439()	0
@@ -243,14 +252,17 @@
 	SDX_CPU_SDXPOORWILLS,
 	MSM_CPU_SDM845,
 	MSM_CPU_SDM670,
+	MSM_CPU_SXR1130,
 	MSM_CPU_QCS605,
 	MSM_CPU_SDA670,
 	MSM_CPU_SDM710,
+	MSM_CPU_SXR1120,
 	MSM_CPU_8953,
 	MSM_CPU_SDM450,
 	MSM_CPU_SDM632,
 	MSM_CPU_SDA632,
 	MSM_CPU_8937,
+	MSM_CPU_8917,
 	MSM_CPU_9607,
 	MSM_CPU_SDM439,
 	MSM_CPU_SDM429,
diff --git a/include/soc/qcom/spm.h b/include/soc/qcom/spm.h
index d0f76d0..a6f38cd 100644
--- a/include/soc/qcom/spm.h
+++ b/include/soc/qcom/spm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@
 #if defined(CONFIG_MSM_SPM)
 
 int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm);
+void msm_spm_set_rpm_hs(bool allow_rpm_hs);
 int msm_spm_probe_done(void);
 int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel);
 int msm_spm_get_vdd(unsigned int cpu);
@@ -43,6 +44,8 @@
 struct msm_spm_device *msm_spm_get_device_by_name(const char *name);
 int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
 		unsigned int mode, bool notify_rpm);
+int msm_spm_config_low_power_mode_addr(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm);
 int msm_spm_device_init(void);
 bool msm_spm_is_mode_avail(unsigned int mode);
 void msm_spm_dump_regs(unsigned int cpu);
@@ -80,6 +83,8 @@
 	return -ENODEV;
 }
 
+static inline void msm_spm_set_rpm_hs(bool allow_rpm_hs) {}
+
 static inline int msm_spm_probe_done(void)
 {
 	return -ENODEV;
@@ -114,6 +119,13 @@
 {
 	return -ENODEV;
 }
+
+static inline int msm_spm_config_low_power_mode_addr(
+	struct msm_spm_device *dev, unsigned int mode, bool notify_rpm)
+{
+	return -ENODEV;
+}
+
 static inline struct msm_spm_device *msm_spm_get_device_by_name(
 				const char *name)
 {
diff --git a/include/sound/control.h b/include/sound/control.h
index 21d047f..4142757 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -22,6 +22,7 @@
  *
  */
 
+#include <linux/nospec.h>
 #include <sound/asound.h>
 
 #define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data)
@@ -147,12 +148,14 @@
 
 static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
 {
-	return id->numid - kctl->id.numid;
+	unsigned int ioff = id->numid - kctl->id.numid;
+	return array_index_nospec(ioff, kctl->count);
 }
 
 static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
 {
-	return id->index - kctl->id.index;
+	unsigned int ioff = id->index - kctl->id.index;
+	return array_index_nospec(ioff, kctl->count);
 }
 
 static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
diff --git a/include/trace/events/pdc.h b/include/trace/events/pdc.h
index 400e959..fca0548 100644
--- a/include/trace/events/pdc.h
+++ b/include/trace/events/pdc.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -41,7 +41,7 @@
 	),
 
 	TP_printk("%s hwirq:%u pin:%u type:%u enable:%u",
-		__entry->func, __entry->pin, __entry->hwirq, __entry->type,
+		__entry->func, __entry->hwirq, __entry->pin, __entry->type,
 		__entry->enable)
 );
 
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 28c5da6..3411da7 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -125,6 +125,20 @@
 	TP_ARGS(timer)
 );
 
+#define decode_clockid(type)						\
+	__print_symbolic(type,						\
+		{ CLOCK_REALTIME,	"CLOCK_REALTIME"	},	\
+		{ CLOCK_MONOTONIC,	"CLOCK_MONOTONIC"	},	\
+		{ CLOCK_BOOTTIME,	"CLOCK_BOOTTIME"	},	\
+		{ CLOCK_TAI,		"CLOCK_TAI"		})
+
+#define decode_hrtimer_mode(mode)					\
+	__print_symbolic(mode,						\
+		{ HRTIMER_MODE_ABS,		"ABS"		},	\
+		{ HRTIMER_MODE_REL,		"REL"		},	\
+		{ HRTIMER_MODE_ABS_PINNED,	"ABS|PINNED"	},	\
+		{ HRTIMER_MODE_REL_PINNED,	"REL|PINNED"	})
+
 /**
  * hrtimer_init - called when the hrtimer is initialized
  * @hrtimer:	pointer to struct hrtimer
@@ -151,10 +165,8 @@
 	),
 
 	TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
-		  __entry->clockid == CLOCK_REALTIME ?
-			"CLOCK_REALTIME" : "CLOCK_MONOTONIC",
-		  __entry->mode == HRTIMER_MODE_ABS ?
-			"HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
+		  decode_clockid(__entry->clockid),
+		  decode_hrtimer_mode(__entry->mode))
 );
 
 /**
diff --git a/include/trace/events/trace_msm_low_power.h b/include/trace/events/trace_msm_low_power.h
index c25da0e..54c1272 100644
--- a/include/trace/events/trace_msm_low_power.h
+++ b/include/trace/events/trace_msm_low_power.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -250,6 +250,25 @@
 		__entry->sample, __entry->tmr)
 );
 
+TRACE_EVENT(pre_pc_cb,
+
+	TP_PROTO(int tzflag),
+
+	TP_ARGS(tzflag),
+
+	TP_STRUCT__entry(
+		__field(int, tzflag)
+	),
+
+	TP_fast_assign(
+		__entry->tzflag = tzflag;
+	),
+
+	TP_printk("tzflag:%d",
+		__entry->tzflag
+	)
+);
+
 #endif
 #define TRACE_INCLUDE_FILE trace_msm_low_power
 #include <trace/define_trace.h>
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 2ccd9cc..7bd8783 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -31,7 +31,7 @@
 
 #define WB_WORK_REASON							\
 	EM( WB_REASON_BACKGROUND,		"background")		\
-	EM( WB_REASON_TRY_TO_FREE_PAGES,	"try_to_free_pages")	\
+	EM( WB_REASON_VMSCAN,			"vmscan")		\
 	EM( WB_REASON_SYNC,			"sync")			\
 	EM( WB_REASON_PERIODIC,			"periodic")		\
 	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index bce990f..d6be935 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -377,22 +377,6 @@
 DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
 DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
 
-TRACE_EVENT(xen_mmu_flush_tlb_all,
-	    TP_PROTO(int x),
-	    TP_ARGS(x),
-	    TP_STRUCT__entry(__array(char, x, 0)),
-	    TP_fast_assign((void)x),
-	    TP_printk("%s", "")
-	);
-
-TRACE_EVENT(xen_mmu_flush_tlb,
-	    TP_PROTO(int x),
-	    TP_ARGS(x),
-	    TP_STRUCT__entry(__array(char, x, 0)),
-	    TP_fast_assign((void)x),
-	    TP_printk("%s", "")
-	);
-
 TRACE_EVENT(xen_mmu_flush_tlb_single,
 	    TP_PROTO(unsigned long addr),
 	    TP_ARGS(addr),
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index d0341cd..4b26cba 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -180,8 +180,12 @@
  */
 struct drm_msm_gem_submit_reloc {
 	__u32 submit_offset;  /* in, offset from submit_bo */
-	__u32 or;             /* in, value OR'd with result */
-	__s32 shift;          /* in, amount of left shift (can be negative) */
+#ifdef __cplusplus
+	__u32 or_val;
+#else
+	__u32 or; /* in, value OR'd with result */
+#endif
+	__s32  shift;          /* in, amount of left shift (can be negative) */
 	__u32 reloc_idx;      /* in, index of reloc_bo buffer */
 	__u64 reloc_offset;   /* in, offset from start of reloc_bo */
 };
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index 91a31ff..9a781f0 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -63,6 +63,7 @@
 };
 
 #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
+#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
 
 struct drm_virtgpu_getparam {
 	__u64 param;
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index c9248e5a..5da3634 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -472,6 +472,8 @@
 header-y += v4l2-dv-timings.h
 header-y += v4l2-mediabus.h
 header-y += v4l2-subdev.h
+header-y += msm_vidc_dec.h
+header-y += msm_vidc_enc.h
 header-y += veth.h
 header-y += vfio.h
 header-y += vhost.h
diff --git a/include/uapi/linux/bgcom_interface.h b/include/uapi/linux/bgcom_interface.h
index f18280a..1ee7b87 100644
--- a/include/uapi/linux/bgcom_interface.h
+++ b/include/uapi/linux/bgcom_interface.h
@@ -19,6 +19,7 @@
 #define BGCOM_REG_WRITE  5
 #define BGCOM_SOFT_RESET  6
 #define BGCOM_MODEM_DOWN2_BG  7
+#define BGCOM_TWM_EXIT  8
 #define EXCHANGE_CODE  'V'
 
 struct bg_ui_data {
@@ -57,6 +58,9 @@
 #define BG_SOFT_RESET \
 	_IOWR(EXCHANGE_CODE, BGCOM_SOFT_RESET, \
 	struct bg_ui_data)
+#define BG_TWM_EXIT \
+	_IOWR(EXCHANGE_CODE, BGCOM_TWM_EXIT, \
+	struct bg_ui_data)
 #define BG_MODEM_DOWN2_BG_DONE \
 	_IOWR(EXCHANGE_CODE, BGCOM_MODEM_DOWN2_BG, \
 	struct bg_ui_data)
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 12263e4..fcc0e76 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -226,7 +226,6 @@
 #define BLKSECDISCARD _IO(0x12,125)
 #define BLKROTATIONAL _IO(0x12,126)
 #define BLKZEROOUT _IO(0x12,127)
-#define BLKGETSTPART _IO(0x12, 128)
 
 #define BMAP_IOCTL 1		/* obsolete - kept for compatibility */
 #define FIBMAP	   _IO(0x00,1)	/* bmap access */
@@ -273,6 +272,9 @@
 #define FS_ENCRYPTION_MODE_AES_256_CTS		4
 #define FS_ENCRYPTION_MODE_AES_128_CBC		5
 #define FS_ENCRYPTION_MODE_AES_128_CTS		6
+#define FS_ENCRYPTION_MODE_SPECK128_256_XTS	7
+#define FS_ENCRYPTION_MODE_SPECK128_256_CTS	8
+#define FS_ENCRYPTION_MODE_PRIVATE		127
 
 struct fscrypt_policy {
 	__u8 version;
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index cae866f..6475a16 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -29,6 +29,7 @@
  */
 
 #define ETH_ALEN	6		/* Octets in one ethernet addr	 */
+#define ETH_TLEN	2		/* Octets in ethernet type field */
 #define ETH_HLEN	14		/* Total octets in header.	 */
 #define ETH_ZLEN	60		/* Min. octets in frame sans FCS */
 #define ETH_DATA_LEN	1500		/* Max. octets in payload	 */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 4ee67cb..05b9bb6 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -870,6 +870,7 @@
 #define KVM_CAP_S390_USER_INSTR0 130
 #define KVM_CAP_MSI_DEVID 131
 #define KVM_CAP_PPC_HTM 132
+#define KVM_CAP_S390_BPB 152
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 1c39daf..be4cb02 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -98,6 +98,8 @@
 #define IPA_IOCTL_CLEANUP                       56
 #define IPA_IOCTL_QUERY_WLAN_CLIENT             57
 #define IPA_IOCTL_GET_VLAN_MODE                 58
+#define IPA_IOCTL_ADD_BRIDGE_VLAN_MAPPING       59
+#define IPA_IOCTL_DEL_BRIDGE_VLAN_MAPPING       60
 
 /**
  * max size of the header to be inserted
@@ -512,7 +514,13 @@
 	IPA_PER_CLIENT_STATS_EVENT_MAX
 };
 
-#define IPA_EVENT_MAX_NUM (IPA_PER_CLIENT_STATS_EVENT_MAX)
+enum ipa_vlan_bridge_event {
+	ADD_BRIDGE_VLAN_MAPPING = IPA_PER_CLIENT_STATS_EVENT_MAX,
+	DEL_BRIDGE_VLAN_MAPPING,
+	BRIDGE_VLAN_MAPPING_MAX
+};
+
+#define IPA_EVENT_MAX_NUM (BRIDGE_VLAN_MAPPING_MAX)
 #define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM)
 
 /**
@@ -1872,6 +1880,20 @@
 };
 
 /**
+ * struct ipa_ioc_bridge_vlan_mapping_info - vlan to bridge mapping info
+ * @bridge_name: bridge interface name
+ * @vlan_id: vlan ID bridge is mapped to
+ * @bridge_ipv4: bridge interface ipv4 address
+ * @subnet_mask: bridge interface subnet mask
+ */
+struct ipa_ioc_bridge_vlan_mapping_info {
+	char bridge_name[IPA_RESOURCE_NAME_MAX];
+	uint16_t vlan_id;
+	uint32_t bridge_ipv4;
+	uint32_t subnet_mask;
+};
+
+/**
  *   actual IOCTLs supported by IPA driver
  */
 #define IPA_IOC_ADD_HDR _IOWR(IPA_IOC_MAGIC, \
@@ -2056,6 +2078,15 @@
 #define IPA_IOC_GET_VLAN_MODE _IOWR(IPA_IOC_MAGIC, \
 				IPA_IOCTL_GET_VLAN_MODE, \
 				struct ipa_ioc_get_vlan_mode *)
+
+#define IPA_IOC_ADD_BRIDGE_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_BRIDGE_VLAN_MAPPING, \
+				struct ipa_ioc_bridge_vlan_mapping_info)
+
+#define IPA_IOC_DEL_BRIDGE_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_BRIDGE_VLAN_MAPPING, \
+				struct ipa_ioc_bridge_vlan_mapping_info)
+
 /*
  * unique magic number of the Tethering bridge ioctls
  */
diff --git a/include/uapi/linux/msm_mdp_ext.h b/include/uapi/linux/msm_mdp_ext.h
index 1a2a7e2c..6e4a6d7 100644
--- a/include/uapi/linux/msm_mdp_ext.h
+++ b/include/uapi/linux/msm_mdp_ext.h
@@ -34,9 +34,9 @@
  * To allow proper structure padding for 64bit/32bit target
  */
 #ifdef __LP64
-#define MDP_LAYER_COMMIT_V1_PAD 2
+#define MDP_LAYER_COMMIT_V1_PAD 1
 #else
-#define MDP_LAYER_COMMIT_V1_PAD 3
+#define MDP_LAYER_COMMIT_V1_PAD 2
 #endif
 
 /*
@@ -147,6 +147,9 @@
  */
 #define MDP_COMMIT_AVR_ONE_SHOT_MODE		0x10
 
+/* Flag to update brightness when commit */
+#define MDP_COMMIT_UPDATE_BRIGHTNESS		0x40
+
 /* Flag to enable concurrent writeback for the frame */
 #define MDP_COMMIT_CWB_EN 0x800
 
@@ -485,6 +488,9 @@
 	/* FRC info per device which contains frame count and timestamp */
 	struct mdp_frc_info __user *frc_info;
 
+	/* Backlight level that would update when display commit */
+	uint32_t		bl_level;
+
 	/* 32-bits reserved value for future usage. */
 	uint32_t		reserved[MDP_LAYER_COMMIT_V1_PAD];
 };
diff --git a/include/uapi/linux/msm_vidc_dec.h b/include/uapi/linux/msm_vidc_dec.h
new file mode 100644
index 0000000..46af82b
--- /dev/null
+++ b/include/uapi/linux/msm_vidc_dec.h
@@ -0,0 +1,629 @@
+#ifndef _UAPI_MSM_VIDC_DEC_H_
+#define _UAPI_MSM_VIDC_DEC_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* STATUS CODES */
+/* Base value for status codes */
+#define VDEC_S_BASE	0x40000000
+/* Success */
+#define VDEC_S_SUCCESS	(VDEC_S_BASE)
+/* General failure */
+#define VDEC_S_EFAIL	(VDEC_S_BASE + 1)
+/* Fatal irrecoverable  failure. Need to  tear down session. */
+#define VDEC_S_EFATAL   (VDEC_S_BASE + 2)
+/* Error detected in the passed  parameters */
+#define VDEC_S_EBADPARAM	(VDEC_S_BASE + 3)
+/* Command called in invalid  state. */
+#define VDEC_S_EINVALSTATE	(VDEC_S_BASE + 4)
+ /* Insufficient OS  resources - thread, memory etc. */
+#define VDEC_S_ENOSWRES	(VDEC_S_BASE + 5)
+ /* Insufficient HW resources -  core capacity  maxed  out. */
+#define VDEC_S_ENOHWRES	(VDEC_S_BASE + 6)
+/* Invalid command  called */
+#define VDEC_S_EINVALCMD	(VDEC_S_BASE + 7)
+/* Command timeout. */
+#define VDEC_S_ETIMEOUT	(VDEC_S_BASE + 8)
+/* Pre-requirement is  not met for API. */
+#define VDEC_S_ENOPREREQ	(VDEC_S_BASE + 9)
+/* Command queue is full. */
+#define VDEC_S_ECMDQFULL	(VDEC_S_BASE + 10)
+/* Command is not supported  by this driver */
+#define VDEC_S_ENOTSUPP	(VDEC_S_BASE + 11)
+/* Command is not implemented by thedriver. */
+#define VDEC_S_ENOTIMPL	(VDEC_S_BASE + 12)
+/* Command is not implemented by the driver.  */
+#define VDEC_S_BUSY	(VDEC_S_BASE + 13)
+#define VDEC_S_INPUT_BITSTREAM_ERR (VDEC_S_BASE + 14)
+
+#define VDEC_INTF_VER	1
+#define VDEC_MSG_BASE	0x0000000
+/*
+ *Codes to identify asynchronous message responses and events that driver
+ *wants to communicate to the app.
+ */
+#define VDEC_MSG_INVALID	(VDEC_MSG_BASE + 0)
+#define VDEC_MSG_RESP_INPUT_BUFFER_DONE	(VDEC_MSG_BASE + 1)
+#define VDEC_MSG_RESP_OUTPUT_BUFFER_DONE	(VDEC_MSG_BASE + 2)
+#define VDEC_MSG_RESP_INPUT_FLUSHED	(VDEC_MSG_BASE + 3)
+#define VDEC_MSG_RESP_OUTPUT_FLUSHED	(VDEC_MSG_BASE + 4)
+#define VDEC_MSG_RESP_FLUSH_INPUT_DONE	(VDEC_MSG_BASE + 5)
+#define VDEC_MSG_RESP_FLUSH_OUTPUT_DONE	(VDEC_MSG_BASE + 6)
+#define VDEC_MSG_RESP_START_DONE	(VDEC_MSG_BASE + 7)
+#define VDEC_MSG_RESP_STOP_DONE	(VDEC_MSG_BASE + 8)
+#define VDEC_MSG_RESP_PAUSE_DONE	(VDEC_MSG_BASE + 9)
+#define VDEC_MSG_RESP_RESUME_DONE	(VDEC_MSG_BASE + 10)
+#define VDEC_MSG_RESP_RESOURCE_LOADED	(VDEC_MSG_BASE + 11)
+#define VDEC_EVT_RESOURCES_LOST	(VDEC_MSG_BASE + 12)
+#define VDEC_MSG_EVT_CONFIG_CHANGED	(VDEC_MSG_BASE + 13)
+#define VDEC_MSG_EVT_HW_ERROR	(VDEC_MSG_BASE + 14)
+#define VDEC_MSG_EVT_INFO_CONFIG_CHANGED	(VDEC_MSG_BASE + 15)
+#define VDEC_MSG_EVT_INFO_FIELD_DROPPED	(VDEC_MSG_BASE + 16)
+#define VDEC_MSG_EVT_HW_OVERLOAD	(VDEC_MSG_BASE + 17)
+#define VDEC_MSG_EVT_MAX_CLIENTS	(VDEC_MSG_BASE + 18)
+#define VDEC_MSG_EVT_HW_UNSUPPORTED	(VDEC_MSG_BASE + 19)
+
+/*Buffer flags bits masks.*/
+#define VDEC_BUFFERFLAG_EOS	0x00000001
+#define VDEC_BUFFERFLAG_DECODEONLY	0x00000004
+#define VDEC_BUFFERFLAG_DATACORRUPT	0x00000008
+#define VDEC_BUFFERFLAG_ENDOFFRAME	0x00000010
+#define VDEC_BUFFERFLAG_SYNCFRAME	0x00000020
+#define VDEC_BUFFERFLAG_EXTRADATA	0x00000040
+#define VDEC_BUFFERFLAG_CODECCONFIG	0x00000080
+
+/*Post processing flags bit masks*/
+#define VDEC_EXTRADATA_NONE 0x001
+#define VDEC_EXTRADATA_QP 0x004
+#define VDEC_EXTRADATA_MB_ERROR_MAP 0x008
+#define VDEC_EXTRADATA_SEI 0x010
+#define VDEC_EXTRADATA_VUI 0x020
+#define VDEC_EXTRADATA_VC1 0x040
+
+#define VDEC_EXTRADATA_EXT_DATA          0x0800
+#define VDEC_EXTRADATA_USER_DATA         0x1000
+#define VDEC_EXTRADATA_EXT_BUFFER        0x2000
+
+#define VDEC_CMDBASE	0x800
+#define VDEC_CMD_SET_INTF_VERSION	(VDEC_CMDBASE)
+
+#define VDEC_IOCTL_MAGIC 'v'
+
+struct vdec_ioctl_msg {
+	void __user *in;
+	void __user *out;
+};
+
+/*
+ * CMD params: InputParam:enum vdec_codec
+ * OutputParam: struct vdec_profile_level
+ */
+#define VDEC_IOCTL_GET_PROFILE_LEVEL_SUPPORTED \
+	_IOWR(VDEC_IOCTL_MAGIC, 0, struct vdec_ioctl_msg)
+
+/*
+ * CMD params:InputParam: NULL
+ * OutputParam: uint32_t(bitmask)
+ */
+#define VDEC_IOCTL_GET_INTERLACE_FORMAT \
+	_IOR(VDEC_IOCTL_MAGIC, 1, struct vdec_ioctl_msg)
+
+/*
+ * CMD params: InputParam:  enum vdec_codec
+ * OutputParam: struct vdec_profile_level
+ */
+#define VDEC_IOCTL_GET_CURRENT_PROFILE_LEVEL \
+	_IOWR(VDEC_IOCTL_MAGIC, 2, struct vdec_ioctl_msg)
+
+/*
+ * CMD params: SET: InputParam: enum vdec_output_fromat  OutputParam: NULL
+ * GET:  InputParam: NULL OutputParam: enum vdec_output_fromat
+ */
+#define VDEC_IOCTL_SET_OUTPUT_FORMAT \
+	_IOWR(VDEC_IOCTL_MAGIC, 3, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_OUTPUT_FORMAT \
+	_IOWR(VDEC_IOCTL_MAGIC, 4, struct vdec_ioctl_msg)
+
+/*
+ * CMD params: SET: InputParam: enum vdec_codec OutputParam: NULL
+ * GET: InputParam: NULL OutputParam: enum vdec_codec
+ */
+#define VDEC_IOCTL_SET_CODEC \
+	_IOW(VDEC_IOCTL_MAGIC, 5, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_CODEC \
+	_IOR(VDEC_IOCTL_MAGIC, 6, struct vdec_ioctl_msg)
+
+/*
+ * CMD params: SET: InputParam: struct vdec_picsize outputparam: NULL
+ * GET: InputParam: NULL outputparam: struct vdec_picsize
+ */
+#define VDEC_IOCTL_SET_PICRES \
+	_IOW(VDEC_IOCTL_MAGIC, 7, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_PICRES \
+	_IOR(VDEC_IOCTL_MAGIC, 8, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_EXTRADATA \
+	_IOW(VDEC_IOCTL_MAGIC, 9, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_EXTRADATA \
+	_IOR(VDEC_IOCTL_MAGIC, 10, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_SEQUENCE_HEADER \
+	_IOW(VDEC_IOCTL_MAGIC, 11, struct vdec_ioctl_msg)
+
+/*
+ * CMD params: SET: InputParam - vdec_allocatorproperty, OutputParam - NULL
+ * GET: InputParam - NULL, OutputParam - vdec_allocatorproperty
+ */
+#define VDEC_IOCTL_SET_BUFFER_REQ \
+	_IOW(VDEC_IOCTL_MAGIC, 12, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_BUFFER_REQ \
+	_IOR(VDEC_IOCTL_MAGIC, 13, struct vdec_ioctl_msg)
+/* CMD params: InputParam - vdec_buffer, OutputParam - uint8_t** */
+#define VDEC_IOCTL_ALLOCATE_BUFFER \
+	_IOWR(VDEC_IOCTL_MAGIC, 14, struct vdec_ioctl_msg)
+/* CMD params: InputParam - uint8_t *, OutputParam - NULL.*/
+#define VDEC_IOCTL_FREE_BUFFER \
+	_IOW(VDEC_IOCTL_MAGIC, 15, struct vdec_ioctl_msg)
+
+/*CMD params: CMD: InputParam - struct vdec_setbuffer_cmd, OutputParam - NULL*/
+#define VDEC_IOCTL_SET_BUFFER \
+	_IOW(VDEC_IOCTL_MAGIC, 16, struct vdec_ioctl_msg)
+
+/* CMD params: InputParam - struct vdec_fillbuffer_cmd, OutputParam - NULL*/
+#define VDEC_IOCTL_FILL_OUTPUT_BUFFER \
+	_IOW(VDEC_IOCTL_MAGIC, 17, struct vdec_ioctl_msg)
+
+/*CMD params: InputParam - struct vdec_frameinfo , OutputParam - NULL*/
+#define VDEC_IOCTL_DECODE_FRAME \
+	_IOW(VDEC_IOCTL_MAGIC, 18, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_LOAD_RESOURCES _IO(VDEC_IOCTL_MAGIC, 19)
+#define VDEC_IOCTL_CMD_START _IO(VDEC_IOCTL_MAGIC, 20)
+#define VDEC_IOCTL_CMD_STOP _IO(VDEC_IOCTL_MAGIC, 21)
+#define VDEC_IOCTL_CMD_PAUSE _IO(VDEC_IOCTL_MAGIC, 22)
+#define VDEC_IOCTL_CMD_RESUME _IO(VDEC_IOCTL_MAGIC, 23)
+
+/*CMD params: InputParam - enum vdec_bufferflush , OutputParam - NULL */
+#define VDEC_IOCTL_CMD_FLUSH _IOW(VDEC_IOCTL_MAGIC, 24, struct vdec_ioctl_msg)
+
+/* ========================================================
+ * IOCTL for getting asynchronous notification from driver
+ * ========================================================
+ */
+
+/*IOCTL params: InputParam - NULL, OutputParam - struct vdec_msginfo*/
+#define VDEC_IOCTL_GET_NEXT_MSG \
+	_IOR(VDEC_IOCTL_MAGIC, 25, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_STOP_NEXT_MSG _IO(VDEC_IOCTL_MAGIC, 26)
+
+#define VDEC_IOCTL_GET_NUMBER_INSTANCES \
+	_IOR(VDEC_IOCTL_MAGIC, 27, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_PICTURE_ORDER \
+	_IOW(VDEC_IOCTL_MAGIC, 28, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_FRAME_RATE \
+	_IOW(VDEC_IOCTL_MAGIC, 29, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_H264_MV_BUFFER \
+	_IOW(VDEC_IOCTL_MAGIC, 30, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_FREE_H264_MV_BUFFER \
+	_IOW(VDEC_IOCTL_MAGIC, 31, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_GET_MV_BUFFER_SIZE  \
+	_IOR(VDEC_IOCTL_MAGIC, 32, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_IDR_ONLY_DECODING \
+	_IO(VDEC_IOCTL_MAGIC, 33)
+
+#define VDEC_IOCTL_SET_CONT_ON_RECONFIG  \
+	_IO(VDEC_IOCTL_MAGIC, 34)
+
+#define VDEC_IOCTL_SET_DISABLE_DMX \
+	_IOW(VDEC_IOCTL_MAGIC, 35, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_GET_DISABLE_DMX \
+	_IOR(VDEC_IOCTL_MAGIC, 36, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_GET_DISABLE_DMX_SUPPORT \
+	_IOR(VDEC_IOCTL_MAGIC, 37, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_PERF_CLK \
+	_IOR(VDEC_IOCTL_MAGIC, 38, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_META_BUFFERS \
+	_IOW(VDEC_IOCTL_MAGIC, 39, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_FREE_META_BUFFERS \
+	_IO(VDEC_IOCTL_MAGIC, 40)
+
+enum vdec_picture {
+	PICTURE_TYPE_I,
+	PICTURE_TYPE_P,
+	PICTURE_TYPE_B,
+	PICTURE_TYPE_BI,
+	PICTURE_TYPE_SKIP,
+	PICTURE_TYPE_IDR,
+	PICTURE_TYPE_UNKNOWN
+};
+
+enum vdec_buffer {
+	VDEC_BUFFER_TYPE_INPUT,
+	VDEC_BUFFER_TYPE_OUTPUT
+};
+
+struct vdec_allocatorproperty {
+	enum vdec_buffer buffer_type;
+	uint32_t mincount;
+	uint32_t maxcount;
+	uint32_t actualcount;
+	size_t buffer_size;
+	uint32_t alignment;
+	uint32_t buf_poolid;
+	size_t meta_buffer_size;
+};
+
+struct vdec_bufferpayload {
+	void __user *bufferaddr;
+	size_t buffer_len;
+	int pmem_fd;
+	size_t offset;
+	size_t mmaped_size;
+};
+
+struct vdec_setbuffer_cmd {
+	enum vdec_buffer buffer_type;
+	struct vdec_bufferpayload buffer;
+};
+
+struct vdec_fillbuffer_cmd {
+	struct vdec_bufferpayload buffer;
+	void *client_data;
+};
+
+enum vdec_bufferflush {
+	VDEC_FLUSH_TYPE_INPUT,
+	VDEC_FLUSH_TYPE_OUTPUT,
+	VDEC_FLUSH_TYPE_ALL
+};
+
+enum vdec_codec {
+	VDEC_CODECTYPE_H264 = 0x1,
+	VDEC_CODECTYPE_H263 = 0x2,
+	VDEC_CODECTYPE_MPEG4 = 0x3,
+	VDEC_CODECTYPE_DIVX_3 = 0x4,
+	VDEC_CODECTYPE_DIVX_4 = 0x5,
+	VDEC_CODECTYPE_DIVX_5 = 0x6,
+	VDEC_CODECTYPE_DIVX_6 = 0x7,
+	VDEC_CODECTYPE_XVID = 0x8,
+	VDEC_CODECTYPE_MPEG1 = 0x9,
+	VDEC_CODECTYPE_MPEG2 = 0xa,
+	VDEC_CODECTYPE_VC1 = 0xb,
+	VDEC_CODECTYPE_VC1_RCV = 0xc,
+	VDEC_CODECTYPE_HEVC = 0xd,
+	VDEC_CODECTYPE_MVC = 0xe,
+	VDEC_CODECTYPE_VP8 = 0xf,
+	VDEC_CODECTYPE_VP9 = 0x10,
+};
+
+enum vdec_mpeg2_profile {
+	VDEC_MPEG2ProfileSimple = 0x1,
+	VDEC_MPEG2ProfileMain = 0x2,
+	VDEC_MPEG2Profile422 = 0x4,
+	VDEC_MPEG2ProfileSNR = 0x8,
+	VDEC_MPEG2ProfileSpatial = 0x10,
+	VDEC_MPEG2ProfileHigh = 0x20,
+	VDEC_MPEG2ProfileKhronosExtensions = 0x6F000000,
+	VDEC_MPEG2ProfileVendorStartUnused = 0x7F000000,
+	VDEC_MPEG2ProfileMax = 0x7FFFFFFF
+};
+
+enum vdec_mpeg2_level {
+
+	VDEC_MPEG2LevelLL = 0x1,
+	VDEC_MPEG2LevelML = 0x2,
+	VDEC_MPEG2LevelH14 = 0x4,
+	VDEC_MPEG2LevelHL = 0x8,
+	VDEC_MPEG2LevelKhronosExtensions = 0x6F000000,
+	VDEC_MPEG2LevelVendorStartUnused = 0x7F000000,
+	VDEC_MPEG2LevelMax = 0x7FFFFFFF
+};
+
+enum vdec_mpeg4_profile {
+	VDEC_MPEG4ProfileSimple = 0x01,
+	VDEC_MPEG4ProfileSimpleScalable = 0x02,
+	VDEC_MPEG4ProfileCore = 0x04,
+	VDEC_MPEG4ProfileMain = 0x08,
+	VDEC_MPEG4ProfileNbit = 0x10,
+	VDEC_MPEG4ProfileScalableTexture = 0x20,
+	VDEC_MPEG4ProfileSimpleFace = 0x40,
+	VDEC_MPEG4ProfileSimpleFBA = 0x80,
+	VDEC_MPEG4ProfileBasicAnimated = 0x100,
+	VDEC_MPEG4ProfileHybrid = 0x200,
+	VDEC_MPEG4ProfileAdvancedRealTime = 0x400,
+	VDEC_MPEG4ProfileCoreScalable = 0x800,
+	VDEC_MPEG4ProfileAdvancedCoding = 0x1000,
+	VDEC_MPEG4ProfileAdvancedCore = 0x2000,
+	VDEC_MPEG4ProfileAdvancedScalable = 0x4000,
+	VDEC_MPEG4ProfileAdvancedSimple = 0x8000,
+	VDEC_MPEG4ProfileKhronosExtensions = 0x6F000000,
+	VDEC_MPEG4ProfileVendorStartUnused = 0x7F000000,
+	VDEC_MPEG4ProfileMax = 0x7FFFFFFF
+};
+
+enum vdec_mpeg4_level {
+	VDEC_MPEG4Level0 = 0x01,
+	VDEC_MPEG4Level0b = 0x02,
+	VDEC_MPEG4Level1 = 0x04,
+	VDEC_MPEG4Level2 = 0x08,
+	VDEC_MPEG4Level3 = 0x10,
+	VDEC_MPEG4Level4 = 0x20,
+	VDEC_MPEG4Level4a = 0x40,
+	VDEC_MPEG4Level5 = 0x80,
+	VDEC_MPEG4LevelKhronosExtensions = 0x6F000000,
+	VDEC_MPEG4LevelVendorStartUnused = 0x7F000000,
+	VDEC_MPEG4LevelMax = 0x7FFFFFFF
+};
+
+enum vdec_avc_profile {
+	VDEC_AVCProfileBaseline = 0x01,
+	VDEC_AVCProfileMain = 0x02,
+	VDEC_AVCProfileExtended = 0x04,
+	VDEC_AVCProfileHigh = 0x08,
+	VDEC_AVCProfileHigh10 = 0x10,
+	VDEC_AVCProfileHigh422 = 0x20,
+	VDEC_AVCProfileHigh444 = 0x40,
+	VDEC_AVCProfileKhronosExtensions = 0x6F000000,
+	VDEC_AVCProfileVendorStartUnused = 0x7F000000,
+	VDEC_AVCProfileMax = 0x7FFFFFFF
+};
+
+enum vdec_avc_level {
+	VDEC_AVCLevel1 = 0x01,
+	VDEC_AVCLevel1b = 0x02,
+	VDEC_AVCLevel11 = 0x04,
+	VDEC_AVCLevel12 = 0x08,
+	VDEC_AVCLevel13 = 0x10,
+	VDEC_AVCLevel2 = 0x20,
+	VDEC_AVCLevel21 = 0x40,
+	VDEC_AVCLevel22 = 0x80,
+	VDEC_AVCLevel3 = 0x100,
+	VDEC_AVCLevel31 = 0x200,
+	VDEC_AVCLevel32 = 0x400,
+	VDEC_AVCLevel4 = 0x800,
+	VDEC_AVCLevel41 = 0x1000,
+	VDEC_AVCLevel42 = 0x2000,
+	VDEC_AVCLevel5 = 0x4000,
+	VDEC_AVCLevel51 = 0x8000,
+	VDEC_AVCLevelKhronosExtensions = 0x6F000000,
+	VDEC_AVCLevelVendorStartUnused = 0x7F000000,
+	VDEC_AVCLevelMax = 0x7FFFFFFF
+};
+
+enum vdec_divx_profile {
+	VDEC_DIVXProfile_qMobile = 0x01,
+	VDEC_DIVXProfile_Mobile = 0x02,
+	VDEC_DIVXProfile_HD = 0x04,
+	VDEC_DIVXProfile_Handheld = 0x08,
+	VDEC_DIVXProfile_Portable = 0x10,
+	VDEC_DIVXProfile_HomeTheater = 0x20
+};
+
+enum vdec_xvid_profile {
+	VDEC_XVIDProfile_Simple = 0x1,
+	VDEC_XVIDProfile_Advanced_Realtime_Simple = 0x2,
+	VDEC_XVIDProfile_Advanced_Simple = 0x4
+};
+
+enum vdec_xvid_level {
+	VDEC_XVID_LEVEL_S_L0 = 0x1,
+	VDEC_XVID_LEVEL_S_L1 = 0x2,
+	VDEC_XVID_LEVEL_S_L2 = 0x4,
+	VDEC_XVID_LEVEL_S_L3 = 0x8,
+	VDEC_XVID_LEVEL_ARTS_L1 = 0x10,
+	VDEC_XVID_LEVEL_ARTS_L2 = 0x20,
+	VDEC_XVID_LEVEL_ARTS_L3 = 0x40,
+	VDEC_XVID_LEVEL_ARTS_L4 = 0x80,
+	VDEC_XVID_LEVEL_AS_L0 = 0x100,
+	VDEC_XVID_LEVEL_AS_L1 = 0x200,
+	VDEC_XVID_LEVEL_AS_L2 = 0x400,
+	VDEC_XVID_LEVEL_AS_L3 = 0x800,
+	VDEC_XVID_LEVEL_AS_L4 = 0x1000
+};
+
+enum vdec_h263profile {
+	VDEC_H263ProfileBaseline = 0x01,
+	VDEC_H263ProfileH320Coding = 0x02,
+	VDEC_H263ProfileBackwardCompatible = 0x04,
+	VDEC_H263ProfileISWV2 = 0x08,
+	VDEC_H263ProfileISWV3 = 0x10,
+	VDEC_H263ProfileHighCompression = 0x20,
+	VDEC_H263ProfileInternet = 0x40,
+	VDEC_H263ProfileInterlace = 0x80,
+	VDEC_H263ProfileHighLatency = 0x100,
+	VDEC_H263ProfileKhronosExtensions = 0x6F000000,
+	VDEC_H263ProfileVendorStartUnused = 0x7F000000,
+	VDEC_H263ProfileMax = 0x7FFFFFFF
+};
+
+enum vdec_h263level {
+	VDEC_H263Level10 = 0x01,
+	VDEC_H263Level20 = 0x02,
+	VDEC_H263Level30 = 0x04,
+	VDEC_H263Level40 = 0x08,
+	VDEC_H263Level45 = 0x10,
+	VDEC_H263Level50 = 0x20,
+	VDEC_H263Level60 = 0x40,
+	VDEC_H263Level70 = 0x80,
+	VDEC_H263LevelKhronosExtensions = 0x6F000000,
+	VDEC_H263LevelVendorStartUnused = 0x7F000000,
+	VDEC_H263LevelMax = 0x7FFFFFFF
+};
+
+enum vdec_wmv_format {
+	VDEC_WMVFormatUnused = 0x01,
+	VDEC_WMVFormat7 = 0x02,
+	VDEC_WMVFormat8 = 0x04,
+	VDEC_WMVFormat9 = 0x08,
+	VDEC_WMFFormatKhronosExtensions = 0x6F000000,
+	VDEC_WMFFormatVendorStartUnused = 0x7F000000,
+	VDEC_WMVFormatMax = 0x7FFFFFFF
+};
+
+enum vdec_vc1_profile {
+	VDEC_VC1ProfileSimple = 0x1,
+	VDEC_VC1ProfileMain = 0x2,
+	VDEC_VC1ProfileAdvanced = 0x4
+};
+
+enum vdec_vc1_level {
+	VDEC_VC1_LEVEL_S_Low = 0x1,
+	VDEC_VC1_LEVEL_S_Medium = 0x2,
+	VDEC_VC1_LEVEL_M_Low = 0x4,
+	VDEC_VC1_LEVEL_M_Medium = 0x8,
+	VDEC_VC1_LEVEL_M_High = 0x10,
+	VDEC_VC1_LEVEL_A_L0 = 0x20,
+	VDEC_VC1_LEVEL_A_L1 = 0x40,
+	VDEC_VC1_LEVEL_A_L2 = 0x80,
+	VDEC_VC1_LEVEL_A_L3 = 0x100,
+	VDEC_VC1_LEVEL_A_L4 = 0x200
+};
+
+struct vdec_profile_level {
+	uint32_t profiles;
+	uint32_t levels;
+};
+
+enum vdec_interlaced_format {
+	VDEC_InterlaceFrameProgressive = 0x1,
+	VDEC_InterlaceInterleaveFrameTopFieldFirst = 0x2,
+	VDEC_InterlaceInterleaveFrameBottomFieldFirst = 0x4
+};
+
+#define VDEC_YUV_FORMAT_NV12_TP10_UBWC \
+	VDEC_YUV_FORMAT_NV12_TP10_UBWC
+
+enum vdec_output_fromat {
+	VDEC_YUV_FORMAT_NV12 = 0x1,
+	VDEC_YUV_FORMAT_TILE_4x2 = 0x2,
+	VDEC_YUV_FORMAT_NV12_UBWC = 0x3,
+	VDEC_YUV_FORMAT_NV12_TP10_UBWC = 0x4
+};
+
+enum vdec_output_order {
+	VDEC_ORDER_DISPLAY = 0x1,
+	VDEC_ORDER_DECODE = 0x2
+};
+
+struct vdec_picsize {
+	uint32_t frame_width;
+	uint32_t frame_height;
+	uint32_t stride;
+	uint32_t scan_lines;
+};
+
+struct vdec_seqheader {
+	void __user *ptr_seqheader;
+	size_t seq_header_len;
+	int pmem_fd;
+	size_t pmem_offset;
+};
+
+struct vdec_mberror {
+	void __user *ptr_errormap;
+	size_t err_mapsize;
+};
+
+struct vdec_input_frameinfo {
+	void __user *bufferaddr;
+	size_t offset;
+	size_t datalen;
+	uint32_t flags;
+	int64_t timestamp;
+	void *client_data;
+	int pmem_fd;
+	size_t pmem_offset;
+	void __user *desc_addr;
+	uint32_t desc_size;
+};
+
+struct vdec_framesize {
+	uint32_t   left;
+	uint32_t   top;
+	uint32_t   right;
+	uint32_t   bottom;
+};
+
+struct vdec_aspectratioinfo {
+	uint32_t aspect_ratio;
+	uint32_t par_width;
+	uint32_t par_height;
+};
+
+struct vdec_sep_metadatainfo {
+	void __user *metabufaddr;
+	uint32_t size;
+	int fd;
+	int offset;
+	uint32_t buffer_size;
+};
+
+struct vdec_output_frameinfo {
+	void __user *bufferaddr;
+	size_t offset;
+	size_t len;
+	uint32_t flags;
+	int64_t time_stamp;
+	enum vdec_picture pic_type;
+	void *client_data;
+	void *input_frame_clientdata;
+	struct vdec_picsize picsize;
+	struct vdec_framesize framesize;
+	enum vdec_interlaced_format interlaced_format;
+	struct vdec_aspectratioinfo aspect_ratio_info;
+	struct vdec_sep_metadatainfo metadata_info;
+};
+
+union vdec_msgdata {
+	struct vdec_output_frameinfo output_frame;
+	void *input_frame_clientdata;
+};
+
+struct vdec_msginfo {
+	uint32_t status_code;
+	uint32_t msgcode;
+	union vdec_msgdata msgdata;
+	size_t msgdatasize;
+};
+
+struct vdec_framerate {
+	unsigned long fps_denominator;
+	unsigned long fps_numerator;
+};
+
+struct vdec_h264_mv {
+	size_t size;
+	int count;
+	int pmem_fd;
+	int offset;
+};
+
+struct vdec_mv_buff_size {
+	int width;
+	int height;
+	int size;
+	int alignment;
+};
+
+struct vdec_meta_buffers {
+	size_t size;
+	int count;
+	int pmem_fd;
+	int pmem_fd_iommu;
+	int offset;
+};
+
+#endif /* end of macro _VDECDECODER_H_ */
diff --git a/include/uapi/linux/msm_vidc_enc.h b/include/uapi/linux/msm_vidc_enc.h
new file mode 100644
index 0000000..f4f1630
--- /dev/null
+++ b/include/uapi/linux/msm_vidc_enc.h
@@ -0,0 +1,752 @@
+#ifndef _UAPI_MSM_VIDC_ENC_H_
+#define _UAPI_MSM_VIDC_ENC_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/** STATUS CODES*/
+/* Base value for status codes */
+#define VEN_S_BASE	0x00000000
+#define VEN_S_SUCCESS	(VEN_S_BASE)/* Success */
+#define VEN_S_EFAIL	(VEN_S_BASE+1)/* General failure */
+#define VEN_S_EFATAL	(VEN_S_BASE+2)/* Fatal irrecoverable failure*/
+#define VEN_S_EBADPARAM	(VEN_S_BASE+3)/* Error passed parameters*/
+/*Command called in invalid state*/
+#define VEN_S_EINVALSTATE	(VEN_S_BASE+4)
+#define VEN_S_ENOSWRES	(VEN_S_BASE+5)/* Insufficient OS resources*/
+#define VEN_S_ENOHWRES	(VEN_S_BASE+6)/*Insufficient HW resources */
+#define VEN_S_EBUFFREQ	(VEN_S_BASE+7)/* Buffer requirements were not met*/
+#define VEN_S_EINVALCMD	(VEN_S_BASE+8)/* Invalid command called */
+#define VEN_S_ETIMEOUT	(VEN_S_BASE+9)/* Command timeout. */
+/*Re-attempt was made when multiple invocation not supported for API.*/
+#define VEN_S_ENOREATMPT	(VEN_S_BASE+10)
+#define VEN_S_ENOPREREQ	(VEN_S_BASE+11)/*Pre-requirement is not met for API*/
+#define VEN_S_ECMDQFULL	(VEN_S_BASE+12)/*Command queue is full*/
+#define VEN_S_ENOTSUPP	(VEN_S_BASE+13)/*Command not supported*/
+#define VEN_S_ENOTIMPL	(VEN_S_BASE+14)/*Command not implemented.*/
+#define VEN_S_ENOTPMEM	(VEN_S_BASE+15)/*Buffer is not from PMEM*/
+#define VEN_S_EFLUSHED	(VEN_S_BASE+16)/*returned buffer was flushed*/
+#define VEN_S_EINSUFBUF	(VEN_S_BASE+17)/*provided buffer size insufficient*/
+#define VEN_S_ESAMESTATE	(VEN_S_BASE+18)
+#define VEN_S_EINVALTRANS	(VEN_S_BASE+19)
+
+#define VEN_INTF_VER			 1
+
+/*Asynchronous messages from driver*/
+#define VEN_MSG_INDICATION	0
+#define VEN_MSG_INPUT_BUFFER_DONE	1
+#define VEN_MSG_OUTPUT_BUFFER_DONE	2
+#define VEN_MSG_NEED_OUTPUT_BUFFER	3
+#define VEN_MSG_FLUSH_INPUT_DONE	4
+#define VEN_MSG_FLUSH_OUTPUT_DONE	5
+#define VEN_MSG_START	6
+#define VEN_MSG_STOP	7
+#define VEN_MSG_PAUSE	8
+#define VEN_MSG_RESUME	9
+#define VEN_MSG_STOP_READING_MSG	10
+#define VEN_MSG_LTRUSE_FAILED	    11
+#define VEN_MSG_HW_OVERLOAD	12
+#define VEN_MSG_MAX_CLIENTS	13
+
+
+/*Buffer flags bits masks*/
+#define VEN_BUFFLAG_EOS	0x00000001
+#define VEN_BUFFLAG_ENDOFFRAME	0x00000010
+#define VEN_BUFFLAG_SYNCFRAME	0x00000020
+#define VEN_BUFFLAG_EXTRADATA	0x00000040
+#define VEN_BUFFLAG_CODECCONFIG	0x00000080
+
+/*Post processing flags bit masks*/
+#define VEN_EXTRADATA_NONE          0x001
+#define VEN_EXTRADATA_QCOMFILLER    0x002
+#define VEN_EXTRADATA_SLICEINFO     0x100
+#define VEN_EXTRADATA_LTRINFO       0x200
+#define VEN_EXTRADATA_MBINFO        0x400
+
+/*ENCODER CONFIGURATION CONSTANTS*/
+
+/*Encoded video frame types*/
+#define VEN_FRAME_TYPE_I	1/* I frame type */
+#define VEN_FRAME_TYPE_P	2/* P frame type */
+#define VEN_FRAME_TYPE_B	3/* B frame type */
+
+/*Video codec types*/
+#define VEN_CODEC_MPEG4	1/* MPEG4 Codec */
+#define VEN_CODEC_H264	2/* H.264 Codec */
+#define VEN_CODEC_H263	3/* H.263 Codec */
+
+/*Video codec profile types.*/
+#define VEN_PROFILE_MPEG4_SP      1/* 1 - MPEG4 SP profile      */
+#define VEN_PROFILE_MPEG4_ASP     2/* 2 - MPEG4 ASP profile     */
+#define VEN_PROFILE_H264_BASELINE 3/* 3 - H264 Baseline profile	*/
+#define VEN_PROFILE_H264_MAIN     4/* 4 - H264 Main profile     */
+#define VEN_PROFILE_H264_HIGH     5/* 5 - H264 High profile     */
+#define VEN_PROFILE_H263_BASELINE 6/* 6 - H263 Baseline profile */
+
+/*Video codec profile level types.*/
+#define VEN_LEVEL_MPEG4_0	 0x1/* MPEG4 Level 0  */
+#define VEN_LEVEL_MPEG4_1	 0x2/* MPEG4 Level 1  */
+#define VEN_LEVEL_MPEG4_2	 0x3/* MPEG4 Level 2  */
+#define VEN_LEVEL_MPEG4_3	 0x4/* MPEG4 Level 3  */
+#define VEN_LEVEL_MPEG4_4	 0x5/* MPEG4 Level 4  */
+#define VEN_LEVEL_MPEG4_5	 0x6/* MPEG4 Level 5  */
+#define VEN_LEVEL_MPEG4_3b	 0x7/* MPEG4 Level 3b */
+#define VEN_LEVEL_MPEG4_6	 0x8/* MPEG4 Level 6  */
+
+#define VEN_LEVEL_H264_1	 0x9/* H.264 Level 1   */
+#define VEN_LEVEL_H264_1b        0xA/* H.264 Level 1b  */
+#define VEN_LEVEL_H264_1p1	 0xB/* H.264 Level 1.1 */
+#define VEN_LEVEL_H264_1p2	 0xC/* H.264 Level 1.2 */
+#define VEN_LEVEL_H264_1p3	 0xD/* H.264 Level 1.3 */
+#define VEN_LEVEL_H264_2	 0xE/* H.264 Level 2   */
+#define VEN_LEVEL_H264_2p1	 0xF/* H.264 Level 2.1 */
+#define VEN_LEVEL_H264_2p2	0x10/* H.264 Level 2.2 */
+#define VEN_LEVEL_H264_3	0x11/* H.264 Level 3   */
+#define VEN_LEVEL_H264_3p1	0x12/* H.264 Level 3.1 */
+#define VEN_LEVEL_H264_3p2	0x13/* H.264 Level 3.2 */
+#define VEN_LEVEL_H264_4	0x14/* H.264 Level 4   */
+
+#define VEN_LEVEL_H263_10	0x15/* H.263 Level 10  */
+#define VEN_LEVEL_H263_20	0x16/* H.263 Level 20  */
+#define VEN_LEVEL_H263_30	0x17/* H.263 Level 30  */
+#define VEN_LEVEL_H263_40	0x18/* H.263 Level 40  */
+#define VEN_LEVEL_H263_45	0x19/* H.263 Level 45  */
+#define VEN_LEVEL_H263_50	0x1A/* H.263 Level 50  */
+#define VEN_LEVEL_H263_60	0x1B/* H.263 Level 60  */
+#define VEN_LEVEL_H263_70	0x1C/* H.263 Level 70  */
+
+/*Entropy coding model selection for H.264 encoder.*/
+#define VEN_ENTROPY_MODEL_CAVLC	1
+#define VEN_ENTROPY_MODEL_CABAC	2
+/*Cabac model number (0,1,2) for encoder.*/
+#define VEN_CABAC_MODEL_0	1/* CABAC Model 0. */
+#define VEN_CABAC_MODEL_1	2/* CABAC Model 1. */
+#define VEN_CABAC_MODEL_2	3/* CABAC Model 2. */
+
+/*Deblocking filter control type for encoder.*/
+#define VEN_DB_DISABLE	1/* 1 - Disable deblocking filter*/
+#define VEN_DB_ALL_BLKG_BNDRY	2/* 2 - All blocking boundary filtering*/
+#define VEN_DB_SKIP_SLICE_BNDRY	3/* 3 - Filtering except sliceboundary*/
+
+/*Different methods of Multi slice selection.*/
+#define VEN_MSLICE_OFF	1
+#define VEN_MSLICE_CNT_MB	2 /*number of MBscount per slice*/
+#define VEN_MSLICE_CNT_BYTE	3 /*number of bytes count per slice.*/
+#define VEN_MSLICE_GOB	4 /*Multi slice by GOB for H.263 only.*/
+
+/*Different modes for Rate Control.*/
+#define VEN_RC_OFF	1
+#define VEN_RC_VBR_VFR	2
+#define VEN_RC_VBR_CFR	3
+#define VEN_RC_CBR_VFR	4
+#define VEN_RC_CBR_CFR	5
+
+/*Different modes for flushing buffers*/
+#define VEN_FLUSH_INPUT	1
+#define VEN_FLUSH_OUTPUT	2
+#define VEN_FLUSH_ALL	3
+
+/*Different input formats for YUV data.*/
+#define VEN_INPUTFMT_NV12	1/* NV12 Linear */
+#define VEN_INPUTFMT_NV21	2/* NV21 Linear */
+#define VEN_INPUTFMT_NV12_16M2KA	3/* NV12 Linear */
+
+/*Different allowed rotation modes.*/
+#define VEN_ROTATION_0	1/* 0 degrees */
+#define VEN_ROTATION_90	2/* 90 degrees */
+#define VEN_ROTATION_180	3/* 180 degrees */
+#define VEN_ROTATION_270	4/* 270 degrees */
+
+/*IOCTL timeout values*/
+#define VEN_TIMEOUT_INFINITE	0xffffffff
+
+/*Different allowed intra refresh modes.*/
+#define VEN_IR_OFF	1
+#define VEN_IR_CYCLIC	2
+#define VEN_IR_RANDOM	3
+
+/*IOCTL BASE CODES Not to be used directly by the client.*/
+/* Base value for ioctls that are not related to encoder configuration.*/
+#define VEN_IOCTLBASE_NENC	0x800
+/* Base value for encoder configuration ioctls*/
+#define VEN_IOCTLBASE_ENC	0x850
+
+struct venc_ioctl_msg {
+	void __user *in;
+	void __user *out;
+};
+
+/*NON ENCODER CONFIGURATION IOCTLs*/
+
+/*IOCTL params:SET: InputData - unsigned long, OutputData - NULL*/
+#define VEN_IOCTL_SET_INTF_VERSION \
+	_IOW(VEN_IOCTLBASE_NENC, 0, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_timeout, OutputData - venc_msg*/
+#define VEN_IOCTL_CMD_READ_NEXT_MSG \
+	_IOWR(VEN_IOCTLBASE_NENC, 1, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - NULL, OutputData - NULL*/
+#define VEN_IOCTL_CMD_STOP_READ_MSG	_IO(VEN_IOCTLBASE_NENC, 2)
+
+/*
+ * IOCTL params:SET: InputData - venc_allocatorproperty, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_allocatorproperty
+ */
+#define VEN_IOCTL_SET_INPUT_BUFFER_REQ \
+	_IOW(VEN_IOCTLBASE_NENC, 3, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_INPUT_BUFFER_REQ \
+	_IOR(VEN_IOCTLBASE_NENC, 4, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_CMD_ALLOC_INPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 5, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_SET_INPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 6, struct venc_ioctl_msg)
+
+/*IOCTL params: CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_CMD_FREE_INPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 7, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_allocatorproperty, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_allocatorproperty
+ */
+#define VEN_IOCTL_SET_OUTPUT_BUFFER_REQ \
+	_IOW(VEN_IOCTLBASE_NENC, 8, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_OUTPUT_BUFFER_REQ \
+	_IOR(VEN_IOCTLBASE_NENC, 9, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_CMD_ALLOC_OUTPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 10, struct venc_ioctl_msg)
+
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_SET_OUTPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 11, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL.*/
+#define VEN_IOCTL_CMD_FREE_OUTPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 12, struct venc_ioctl_msg)
+
+
+/* Asynchronous respone message code:* VEN_MSG_START*/
+#define VEN_IOCTL_CMD_START	_IO(VEN_IOCTLBASE_NENC, 13)
+
+
+/*
+ * IOCTL params:CMD: InputData - venc_buffer, OutputData - NULL
+ * Asynchronous respone message code:VEN_MSG_INPUT_BUFFER_DONE
+ */
+#define VEN_IOCTL_CMD_ENCODE_FRAME \
+	_IOW(VEN_IOCTLBASE_NENC, 14, struct venc_ioctl_msg)
+
+
+/*
+ *IOCTL params:CMD: InputData - venc_buffer, OutputData - NULL
+ *Asynchronous response message code:VEN_MSG_OUTPUT_BUFFER_DONE
+ */
+#define VEN_IOCTL_CMD_FILL_OUTPUT_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 15, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:CMD: InputData - venc_bufferflush, OutputData - NULL
+ * Asynchronous response message code:VEN_MSG_INPUT_BUFFER_DONE
+ */
+#define VEN_IOCTL_CMD_FLUSH \
+	_IOW(VEN_IOCTLBASE_NENC, 16, struct venc_ioctl_msg)
+
+
+/*Asynchronous respone message code:VEN_MSG_PAUSE*/
+#define VEN_IOCTL_CMD_PAUSE	_IO(VEN_IOCTLBASE_NENC, 17)
+
+/*Asynchronous respone message code:VEN_MSG_RESUME*/
+#define VEN_IOCTL_CMD_RESUME _IO(VEN_IOCTLBASE_NENC, 18)
+
+/* Asynchronous respone message code:VEN_MSG_STOP*/
+#define VEN_IOCTL_CMD_STOP _IO(VEN_IOCTLBASE_NENC, 19)
+
+#define VEN_IOCTL_SET_RECON_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 20, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_FREE_RECON_BUFFER \
+	_IOW(VEN_IOCTLBASE_NENC, 21, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_GET_RECON_BUFFER_SIZE \
+	_IOW(VEN_IOCTLBASE_NENC, 22, struct venc_ioctl_msg)
+
+
+
+/*ENCODER PROPERTY CONFIGURATION & CAPABILITY IOCTLs*/
+
+/*
+ * IOCTL params:SET: InputData - venc_basecfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_basecfg
+ */
+#define VEN_IOCTL_SET_BASE_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 1, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_BASE_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 2, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_switch
+ */
+#define VEN_IOCTL_SET_LIVE_MODE \
+	_IOW(VEN_IOCTLBASE_ENC, 3, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_LIVE_MODE \
+	_IOR(VEN_IOCTLBASE_ENC, 4, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_profile, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_profile
+ */
+#define VEN_IOCTL_SET_CODEC_PROFILE \
+	_IOW(VEN_IOCTLBASE_ENC, 5, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_CODEC_PROFILE \
+	_IOR(VEN_IOCTLBASE_ENC, 6, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - ven_profilelevel, OutputData - NULL
+ * GET: InputData - NULL, OutputData - ven_profilelevel
+ */
+#define VEN_IOCTL_SET_PROFILE_LEVEL \
+	_IOW(VEN_IOCTLBASE_ENC, 7, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_GET_PROFILE_LEVEL \
+	_IOR(VEN_IOCTLBASE_ENC, 8, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_switch
+ */
+#define VEN_IOCTL_SET_SHORT_HDR \
+	_IOW(VEN_IOCTLBASE_ENC, 9, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_SHORT_HDR \
+	_IOR(VEN_IOCTLBASE_ENC, 10, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params: SET: InputData - venc_sessionqp, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_sessionqp
+ */
+#define VEN_IOCTL_SET_SESSION_QP \
+	_IOW(VEN_IOCTLBASE_ENC, 11, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_SESSION_QP \
+	_IOR(VEN_IOCTLBASE_ENC, 12, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_intraperiod, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_intraperiod
+ */
+#define VEN_IOCTL_SET_INTRA_PERIOD \
+	_IOW(VEN_IOCTLBASE_ENC, 13, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_INTRA_PERIOD \
+	_IOR(VEN_IOCTLBASE_ENC, 14, struct venc_ioctl_msg)
+
+
+/* Request an Iframe*/
+#define VEN_IOCTL_CMD_REQUEST_IFRAME _IO(VEN_IOCTLBASE_ENC, 15)
+
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_capability*/
+#define VEN_IOCTL_GET_CAPABILITY \
+	_IOR(VEN_IOCTLBASE_ENC, 16, struct venc_ioctl_msg)
+
+
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_seqheader*/
+#define VEN_IOCTL_GET_SEQUENCE_HDR \
+	_IOR(VEN_IOCTLBASE_ENC, 17, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_entropycfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_entropycfg
+ */
+#define VEN_IOCTL_SET_ENTROPY_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 18, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_ENTROPY_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 19, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_dbcfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_dbcfg
+ */
+#define VEN_IOCTL_SET_DEBLOCKING_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 20, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_DEBLOCKING_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 21, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_intrarefresh, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_intrarefresh
+ */
+#define VEN_IOCTL_SET_INTRA_REFRESH \
+	_IOW(VEN_IOCTLBASE_ENC, 22, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_INTRA_REFRESH \
+	_IOR(VEN_IOCTLBASE_ENC, 23, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_multiclicecfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_multiclicecfg
+ */
+#define VEN_IOCTL_SET_MULTI_SLICE_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 24, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_MULTI_SLICE_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 25, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_ratectrlcfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_ratectrlcfg
+ */
+#define VEN_IOCTL_SET_RATE_CTRL_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 26, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_RATE_CTRL_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 27, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_voptimingcfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_voptimingcfg
+ */
+#define VEN_IOCTL_SET_VOP_TIMING_CFG \
+	_IOW(VEN_IOCTLBASE_ENC, 28, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_VOP_TIMING_CFG \
+	_IOR(VEN_IOCTLBASE_ENC, 29, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_framerate, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_framerate
+ */
+#define VEN_IOCTL_SET_FRAME_RATE \
+	_IOW(VEN_IOCTLBASE_ENC, 30, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_FRAME_RATE \
+	_IOR(VEN_IOCTLBASE_ENC, 31, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_targetbitrate, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_targetbitrate
+ */
+#define VEN_IOCTL_SET_TARGET_BITRATE \
+	_IOW(VEN_IOCTLBASE_ENC, 32, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_TARGET_BITRATE \
+	_IOR(VEN_IOCTLBASE_ENC, 33, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_rotation, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_rotation
+ */
+#define VEN_IOCTL_SET_ROTATION \
+	_IOW(VEN_IOCTLBASE_ENC, 34, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_ROTATION \
+	_IOR(VEN_IOCTLBASE_ENC, 35, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_headerextension, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_headerextension
+ */
+#define VEN_IOCTL_SET_HEC \
+	_IOW(VEN_IOCTLBASE_ENC, 36, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_HEC \
+	_IOR(VEN_IOCTLBASE_ENC, 37, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_switch
+ */
+#define VEN_IOCTL_SET_DATA_PARTITION \
+	_IOW(VEN_IOCTLBASE_ENC, 38, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_DATA_PARTITION \
+	_IOR(VEN_IOCTLBASE_ENC, 39, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_switch
+ */
+#define VEN_IOCTL_SET_RVLC \
+	_IOW(VEN_IOCTLBASE_ENC, 40, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_RVLC \
+	_IOR(VEN_IOCTLBASE_ENC, 41, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_switch
+ */
+#define VEN_IOCTL_SET_AC_PREDICTION \
+	_IOW(VEN_IOCTLBASE_ENC, 42, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_AC_PREDICTION \
+	_IOR(VEN_IOCTLBASE_ENC, 43, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_qprange, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_qprange
+ */
+#define VEN_IOCTL_SET_QP_RANGE \
+	_IOW(VEN_IOCTLBASE_ENC, 44, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_QP_RANGE \
+	_IOR(VEN_IOCTLBASE_ENC, 45, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_GET_NUMBER_INSTANCES \
+	_IOR(VEN_IOCTLBASE_ENC, 46, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_SET_METABUFFER_MODE \
+	_IOW(VEN_IOCTLBASE_ENC, 47, struct venc_ioctl_msg)
+
+
+/*IOCTL params:SET: InputData - unsigned int, OutputData - NULL.*/
+#define VEN_IOCTL_SET_EXTRADATA \
+	_IOW(VEN_IOCTLBASE_ENC, 48, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - unsigned int.*/
+#define VEN_IOCTL_GET_EXTRADATA \
+	_IOR(VEN_IOCTLBASE_ENC, 49, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - NULL, OutputData - NULL.*/
+#define VEN_IOCTL_SET_SLICE_DELIVERY_MODE \
+	_IO(VEN_IOCTLBASE_ENC, 50)
+
+#define VEN_IOCTL_SET_H263_PLUSPTYPE \
+	_IOW(VEN_IOCTLBASE_ENC, 51, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_range, OutputData - NULL.*/
+#define VEN_IOCTL_SET_CAPABILITY_LTRCOUNT \
+	_IOW(VEN_IOCTLBASE_ENC, 52, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_range.*/
+#define VEN_IOCTL_GET_CAPABILITY_LTRCOUNT \
+	_IOR(VEN_IOCTLBASE_ENC, 53, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrmode, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRMODE \
+	_IOW(VEN_IOCTLBASE_ENC, 54, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrmode.*/
+#define VEN_IOCTL_GET_LTRMODE \
+	_IOR(VEN_IOCTLBASE_ENC, 55, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrcount, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRCOUNT \
+	_IOW(VEN_IOCTLBASE_ENC, 56, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrcount.*/
+#define VEN_IOCTL_GET_LTRCOUNT \
+	_IOR(VEN_IOCTLBASE_ENC, 57, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrperiod, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRPERIOD \
+	_IOW(VEN_IOCTLBASE_ENC, 58, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrperiod.*/
+#define VEN_IOCTL_GET_LTRPERIOD \
+	_IOR(VEN_IOCTLBASE_ENC, 59, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltruse, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRUSE \
+	_IOW(VEN_IOCTLBASE_ENC, 60, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltruse.*/
+#define VEN_IOCTL_GET_LTRUSE \
+	_IOR(VEN_IOCTLBASE_ENC, 61, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrmark, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRMARK \
+	_IOW(VEN_IOCTLBASE_ENC, 62, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrmark.*/
+#define VEN_IOCTL_GET_LTRMARK \
+	_IOR(VEN_IOCTLBASE_ENC, 63, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - unsigned int, OutputData - NULL*/
+#define VEN_IOCTL_SET_SPS_PPS_FOR_IDR \
+	_IOW(VEN_IOCTLBASE_ENC, 64, struct venc_ioctl_msg)
+
+struct venc_range {
+	unsigned long	max;
+	unsigned long	min;
+	unsigned long	step_size;
+};
+
+struct venc_switch {
+	unsigned char	status;
+};
+
+struct venc_allocatorproperty {
+	unsigned long	 mincount;
+	unsigned long	 maxcount;
+	unsigned long	 actualcount;
+	unsigned long	 datasize;
+	unsigned long	 suffixsize;
+	unsigned long	 alignment;
+	unsigned long	 bufpoolid;
+};
+
+struct venc_bufferpayload {
+	unsigned char *pbuffer;
+	size_t	sz;
+	int	fd;
+	unsigned int	offset;
+	unsigned int	maped_size;
+	unsigned long	filled_len;
+};
+
+struct venc_buffer {
+	unsigned char *ptrbuffer;
+	unsigned long	sz;
+	unsigned long	len;
+	unsigned long	offset;
+	long long	timestamp;
+	unsigned long	flags;
+	void	*clientdata;
+};
+
+struct venc_basecfg {
+	unsigned long	input_width;
+	unsigned long	input_height;
+	unsigned long	dvs_width;
+	unsigned long	dvs_height;
+	unsigned long	codectype;
+	unsigned long	fps_num;
+	unsigned long	fps_den;
+	unsigned long	targetbitrate;
+	unsigned long	inputformat;
+};
+
+struct venc_profile {
+	unsigned long	profile;
+};
+struct ven_profilelevel {
+	unsigned long	level;
+};
+
+struct venc_sessionqp {
+	unsigned long	iframeqp;
+	unsigned long	pframqp;
+};
+
+struct venc_qprange {
+	unsigned long	maxqp;
+	unsigned long	minqp;
+};
+
+struct venc_plusptype {
+	unsigned long	plusptype_enable;
+};
+
+struct venc_intraperiod {
+	unsigned long	num_pframes;
+	unsigned long	num_bframes;
+};
+struct venc_seqheader {
+	unsigned char *hdrbufptr;
+	unsigned long	bufsize;
+	unsigned long	hdrlen;
+};
+
+struct venc_capability {
+	unsigned long	codec_types;
+	unsigned long	maxframe_width;
+	unsigned long	maxframe_height;
+	unsigned long	maxtarget_bitrate;
+	unsigned long	maxframe_rate;
+	unsigned long	input_formats;
+	unsigned char	dvs;
+};
+
+struct venc_entropycfg {
+	unsigned int longentropysel;
+	unsigned long	cabacmodel;
+};
+
+struct venc_dbcfg {
+	unsigned long	db_mode;
+	unsigned long	slicealpha_offset;
+	unsigned long	slicebeta_offset;
+};
+
+struct venc_intrarefresh {
+	unsigned long	irmode;
+	unsigned long	mbcount;
+};
+
+struct venc_multiclicecfg {
+	unsigned long	mslice_mode;
+	unsigned long	mslice_size;
+};
+
+struct venc_bufferflush {
+	unsigned long	flush_mode;
+};
+
+struct venc_ratectrlcfg {
+	unsigned long	rcmode;
+};
+
+struct	venc_voptimingcfg {
+	unsigned long	voptime_resolution;
+};
+struct venc_framerate {
+	unsigned long	fps_denominator;
+	unsigned long	fps_numerator;
+};
+
+struct venc_targetbitrate {
+	unsigned long	target_bitrate;
+};
+
+
+struct venc_rotation {
+	unsigned long	rotation;
+};
+
+struct venc_timeout {
+	 unsigned long	millisec;
+};
+
+struct venc_headerextension {
+	 unsigned long	header_extension;
+};
+
+struct venc_msg {
+	unsigned long	statuscode;
+	unsigned long	msgcode;
+	struct venc_buffer	buf;
+	unsigned long	msgdata_size;
+};
+
+struct venc_recon_addr {
+	unsigned char *pbuffer;
+	unsigned long buffer_size;
+	unsigned long pmem_fd;
+	unsigned long offset;
+};
+
+struct venc_recon_buff_size {
+	int width;
+	int height;
+	int size;
+	int alignment;
+};
+
+struct venc_ltrmode {
+	unsigned long   ltr_mode;
+};
+
+struct venc_ltrcount {
+	unsigned long   ltr_count;
+};
+
+struct venc_ltrperiod {
+	unsigned long   ltr_period;
+};
+
+struct venc_ltruse {
+	unsigned long   ltr_id;
+	unsigned long   ltr_frames;
+};
+
+#endif /* _UAPI_MSM_VIDC_ENC_H_ */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 9399a35..20a01ca 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2603,6 +2603,8 @@
 #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
 #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
 
+#define NL80211_WIPHY_NAME_MAXLEN		128
+
 #define NL80211_MAX_SUPP_RATES			32
 #define NL80211_MAX_SUPP_HT_RATES		77
 #define NL80211_MAX_SUPP_REG_RULES		64
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index c1af9b3..f0320a0 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -203,6 +203,18 @@
  */
 #define PR_SET_TIMERSLACK_PID	127
 
+/* Per task speculation control */
+#define PR_GET_SPECULATION_CTRL		52
+#define PR_SET_SPECULATION_CTRL		53
+/* Speculation control variants */
+# define PR_SPEC_STORE_BYPASS		0
+/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
+# define PR_SPEC_NOT_AFFECTED		0
+# define PR_SPEC_PRCTL			(1UL << 0)
+# define PR_SPEC_ENABLE			(1UL << 1)
+# define PR_SPEC_DISABLE		(1UL << 2)
+# define PR_SPEC_FORCE_DISABLE		(1UL << 3)
+
 #define PR_SET_VMA		0x53564d41
 # define PR_SET_VMA_ANON_NAME		0
 
diff --git a/include/uapi/linux/qg-profile.h b/include/uapi/linux/qg-profile.h
index bffddbb..0230b32 100644
--- a/include/uapi/linux/qg-profile.h
+++ b/include/uapi/linux/qg-profile.h
@@ -55,6 +55,8 @@
 #define QG_MAX_FCC_MAH				16000
 #define QG_MIN_SLOPE				1
 #define QG_MAX_SLOPE				50000
+#define QG_ESR_SF_MIN				5000
+#define QG_ESR_SF_MAX				20000
 
 /*  IOCTLs to query battery profile data */
 #define BPIOCXSOC	_IOWR('B', 0x01, struct battery_params) /* SOC */
diff --git a/include/uapi/linux/qg.h b/include/uapi/linux/qg.h
index 2c7b49a..40882a7 100644
--- a/include/uapi/linux/qg.h
+++ b/include/uapi/linux/qg.h
@@ -14,12 +14,12 @@
 	QG_FIFO_TIME_DELTA,
 	QG_BATT_SOC,
 	QG_CC_SOC,
-	QG_RESERVED_3,
-	QG_RESERVED_4,
-	QG_RESERVED_5,
-	QG_RESERVED_6,
-	QG_RESERVED_7,
-	QG_RESERVED_8,
+	QG_ESR_CHARGE_DELTA,
+	QG_ESR_DISCHARGE_DELTA,
+	QG_ESR_CHARGE_SF,
+	QG_ESR_DISCHARGE_SF,
+	QG_FULL_SOC,
+	QG_CLEAR_LEARNT_DATA,
 	QG_RESERVED_9,
 	QG_RESERVED_10,
 	QG_MAX,
@@ -27,6 +27,12 @@
 
 #define QG_BATT_SOC QG_BATT_SOC
 #define QG_CC_SOC QG_CC_SOC
+#define QG_ESR_CHARGE_DELTA QG_ESR_CHARGE_DELTA
+#define QG_ESR_DISCHARGE_DELTA QG_ESR_DISCHARGE_DELTA
+#define QG_ESR_CHARGE_SF QG_ESR_CHARGE_SF
+#define QG_ESR_DISCHARGE_SF QG_ESR_DISCHARGE_SF
+#define QG_FULL_SOC QG_FULL_SOC
+#define QG_CLEAR_LEARNT_DATA QG_CLEAR_LEARNT_DATA
 
 struct fifo_data {
 	unsigned int			v;
diff --git a/include/uapi/linux/qseecom.h b/include/uapi/linux/qseecom.h
index f0a26b2..6de4c76 100644
--- a/include/uapi/linux/qseecom.h
+++ b/include/uapi/linux/qseecom.h
@@ -281,13 +281,6 @@
 	int flag;
 };
 
-struct qseecom_encdec_conf_t {
-	__le64 start_sector;
-	size_t fs_size;
-	int index;
-	int mode;
-};
-
 #define SG_ENTRY_SZ		sizeof(struct qseecom_sg_entry)
 #define SG_ENTRY_SZ_64BIT	sizeof(struct qseecom_sg_entry_64bit)
 
@@ -399,7 +392,4 @@
 #define QSEECOM_IOCTL_SET_ICE_INFO \
 	_IOWR(QSEECOM_IOC_MAGIC, 43, struct qseecom_ice_data_t)
 
-#define QSEECOM_IOCTL_SET_ENCDEC_INFO \
-	_IOWR(QSEECOM_IOC_MAGIC, 44, struct qseecom_encdec_conf_t)
-
 #endif /* _UAPI_QSEECOM_H_ */
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index 0f238a4..e4acb61 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -15,7 +15,9 @@
 #define SECCOMP_SET_MODE_FILTER	1
 
 /* Valid flags for SECCOMP_SET_MODE_FILTER */
-#define SECCOMP_FILTER_FLAG_TSYNC	1
+#define SECCOMP_FILTER_FLAG_TSYNC	(1UL << 0)
+/* In v4.14+ SECCOMP_FILTER_FLAG_LOG is (1UL << 1) */
+#define SECCOMP_FILTER_FLAG_SPEC_ALLOW	(1UL << 2)
 
 /*
  * All BPF programs must return a 32-bit value.
diff --git a/include/uapi/media/msm_camsensor_sdk.h b/include/uapi/media/msm_camsensor_sdk.h
index 6d25967..63abd156 100644
--- a/include/uapi/media/msm_camsensor_sdk.h
+++ b/include/uapi/media/msm_camsensor_sdk.h
@@ -50,6 +50,8 @@
 
 #define MSM_SENSOR_BYPASS_VIDEO_NODE    1
 
+#define SENSOR_PROBE_WRITE
+
 enum msm_sensor_camera_id_t {
 	CAMERA_0,
 	CAMERA_1,
@@ -292,10 +294,19 @@
 	unsigned int            sensor_mount_angle;
 };
 
+struct msm_camera_i2c_reg_setting {
+	struct msm_camera_i2c_reg_array *reg_setting;
+	unsigned short size;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	enum msm_camera_i2c_data_type data_type;
+	unsigned short delay;
+};
+
 struct msm_sensor_id_info_t {
 	unsigned short sensor_id_reg_addr;
 	unsigned short sensor_id;
 	unsigned short sensor_id_mask;
+	struct msm_camera_i2c_reg_setting setting;
 };
 
 struct msm_camera_sensor_slave_info {
@@ -322,14 +333,6 @@
 	unsigned int delay;
 };
 
-struct msm_camera_i2c_reg_setting {
-	struct msm_camera_i2c_reg_array *reg_setting;
-	unsigned short size;
-	enum msm_camera_i2c_reg_addr_type addr_type;
-	enum msm_camera_i2c_data_type data_type;
-	unsigned short delay;
-};
-
 struct msm_camera_csid_vc_cfg {
 	unsigned char cid;
 	unsigned char dt;
diff --git a/include/uapi/media/msmb_isp.h b/include/uapi/media/msmb_isp.h
index 053fa76..74a8d93 100644
--- a/include/uapi/media/msmb_isp.h
+++ b/include/uapi/media/msmb_isp.h
@@ -18,6 +18,7 @@
 #define ISP1_BIT              (0x10000 << 2)
 #define ISP_META_CHANNEL_BIT  (0x10000 << 3)
 #define ISP_SCRATCH_BUF_BIT   (0x10000 << 4)
+#define ISP_PDAF_CHANNEL_BIT  (0x10000 << 5)
 #define ISP_OFFLINE_STATS_BIT (0x10000 << 5)
 #define ISP_SVHDR_IN_BIT      (0x10000 << 6) /* RDI hw stream for SVHDR */
 #define ISP_SVHDR_OUT_BIT     (0x10000 << 7) /* SVHDR output bufq stream*/
@@ -295,6 +296,11 @@
 	uint8_t rdi_cid;/*CID 1-16*/
 };
 
+enum msm_stream_memory_input_t {
+	MEMORY_INPUT_DISABLED,
+	MEMORY_INPUT_ENABLED
+};
+
 enum msm_stream_rdi_input_type {
 	MSM_CAMERA_RDI_MIN,
 	MSM_CAMERA_RDI_PDAF,
@@ -324,6 +330,29 @@
 	enum msm_stream_rdi_input_type rdi_input_type;
 };
 
+struct msm_vfe32_axi_stream_request_cmd {
+	uint32_t session_id;
+	uint32_t stream_id;
+	uint32_t vt_enable;
+	uint32_t output_format;/*Planar/RAW/Misc*/
+	enum msm_vfe_axi_stream_src stream_src; /*CAMIF/IDEAL/RDIs*/
+	struct msm_vfe_axi_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
+
+	uint32_t burst_count;
+	uint32_t hfr_mode;
+	uint8_t frame_base;
+
+	uint32_t init_frame_drop; /*MAX 31 Frames*/
+	enum msm_vfe_frame_skip_pattern frame_skip_pattern;
+	uint8_t buf_divert; /* if TRUE no vb2 buf done. */
+	/*Return values*/
+	uint32_t axi_stream_handle;
+	uint32_t controllable_output;
+	uint32_t burst_len;
+	/* Flag indicating memory input stream */
+	enum msm_stream_memory_input_t memory_input;
+};
+
 struct msm_vfe_axi_stream_release_cmd {
 	uint32_t stream_handle;
 };
@@ -680,7 +709,9 @@
 	ISP_PING_PONG_MISMATCH = 12,
 	ISP_REG_UPDATE_MISSING = 13,
 	ISP_BUF_FATAL_ERROR = 14,
-	ISP_EVENT_MAX         = 15
+	ISP_EVENT_MAX         = 15,
+	ISP_WM_BUS_OVERFLOW = 16,
+	ISP_CAMIF_ERROR     = 17,
 };
 
 #define ISP_EVENT_OFFSET          8
@@ -710,6 +741,7 @@
 #define ISP_EVENT_REG_UPDATE_MISSING (ISP_EVENT_BASE + ISP_REG_UPDATE_MISSING)
 #define ISP_EVENT_BUF_FATAL_ERROR (ISP_EVENT_BASE + ISP_BUF_FATAL_ERROR)
 #define ISP_EVENT_STREAM_UPDATE_DONE   (ISP_STREAM_EVENT_BASE)
+#define ISP_EVENT_WM_BUS_OVERFLOW (ISP_EVENT_BASE + ISP_WM_BUS_OVERFLOW)
 
 /* The msm_v4l2_event_data structure should match the
  * v4l2_event.u.data field.
@@ -759,6 +791,11 @@
 	uint32_t stream_id_mask;
 };
 
+struct msm_isp32_error_info {
+	/* 1 << msm_isp_event_idx */
+	uint32_t error_mask;
+};
+
 /* This structure reports delta between master and slave */
 struct msm_isp_ms_delta_info {
 	uint8_t num_delta_info;
@@ -827,6 +864,25 @@
 	} u; /* union can have max 52 bytes */
 };
 
+struct msm_isp32_event_data {
+	/*Wall clock except for buffer divert events
+	 *which use monotonic clock
+	 */
+	struct timeval timestamp;
+	/* Monotonic timestamp since bootup */
+	struct timeval mono_timestamp;
+	enum msm_vfe_input_src input_intf;
+	uint32_t frame_id;
+	union {
+		/* Sent for Stats_Done event */
+		struct msm_isp_stats_event stats;
+		/* Sent for Buf_Divert event */
+		struct msm_isp_buf_event buf_done;
+		struct msm_isp32_error_info error_info;
+	} u; /* union can have max 52 bytes */
+	uint32_t is_skip_pproc;
+};
+
 enum msm_vfe_ahb_clk_vote {
 	MSM_ISP_CAMERA_AHB_SVS_VOTE = 1,
 	MSM_ISP_CAMERA_AHB_TURBO_VOTE = 2,
@@ -919,6 +975,7 @@
 	MSM_ISP_MAP_BUF_START_MULTI_PASS_FE,
 	MSM_ISP_REQUEST_BUF_VER2,
 	MSM_ISP_DUAL_HW_LPM_MODE,
+	MSM_ISP32_REQUEST_STREAM,
 };
 
 #define VIDIOC_MSM_VFE_REG_CFG \
@@ -941,6 +998,10 @@
 	_IOWR('V', MSM_ISP_REQUEST_STREAM, \
 		struct msm_vfe_axi_stream_request_cmd)
 
+#define VIDIOC_MSM_ISP32_REQUEST_STREAM \
+	_IOWR('V', MSM_ISP32_REQUEST_STREAM, \
+		struct msm_vfe32_axi_stream_request_cmd)
+
 #define VIDIOC_MSM_ISP_CFG_STREAM \
 	_IOWR('V', MSM_ISP_CFG_STREAM, \
 		struct msm_vfe_axi_stream_cfg_cmd)
@@ -1038,6 +1099,8 @@
 
 #define VIDIOC_MSM_ISP_REQUEST_BUF_VER2 \
 	_IOWR('V', MSM_ISP_REQUEST_BUF_VER2, struct msm_isp_buf_request_ver2)
+#define VIDIOC_MSM_ISP_BUF_DONE \
+	_IOWR('V', BASE_VIDIOC_PRIVATE+21, struct msm_isp32_event_data)
 
 #define VIDIOC_MSM_ISP_DUAL_HW_LPM_MODE \
 	_IOWR('V', MSM_ISP_DUAL_HW_LPM_MODE, \
diff --git a/ipc/shm.c b/ipc/shm.c
index b626745..9c687cd 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1127,14 +1127,17 @@
 		goto out;
 	else if ((addr = (ulong)shmaddr)) {
 		if (addr & (shmlba - 1)) {
-			/*
-			 * Round down to the nearest multiple of shmlba.
-			 * For sane do_mmap_pgoff() parameters, avoid
-			 * round downs that trigger nil-page and MAP_FIXED.
-			 */
-			if ((shmflg & SHM_RND) && addr >= shmlba)
-				addr &= ~(shmlba - 1);
-			else
+			if (shmflg & SHM_RND) {
+				addr &= ~(shmlba - 1);  /* round down */
+
+				/*
+				 * Ensure that the round-down is non-nil
+				 * when remapping. This can happen for
+				 * cases when addr < shmlba.
+				 */
+				if (!addr && (shmflg & SHM_REMAP))
+					goto out;
+			} else
 #ifndef __ARCH_FORCE_SHMLBA
 				if (addr & ~PAGE_MASK)
 #endif
diff --git a/kernel/audit.c b/kernel/audit.c
index da4e7c0..3461a3d 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -742,6 +742,8 @@
 		return;
 
 	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE);
+	if (!ab)
+		return;
 	audit_log_task_info(ab, current);
 	audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
 			 audit_feature_names[which], !!old_feature, !!new_feature,
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 2cd5256..93648f6 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -2252,10 +2252,11 @@
 				audit_sig_uid = uid;
 			security_task_getsecid(tsk, &audit_sig_sid);
 		}
-		if (!audit_signals || audit_dummy_context())
-			return 0;
 	}
 
+	if (!audit_signals || audit_dummy_context())
+		return 0;
+
 	/* optimize the common case by putting first signal recipient directly
 	 * in audit_context */
 	if (!ctx->target_pid) {
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 4db6a67..b30ca0f 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -194,7 +194,7 @@
 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 {
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
-	u32 index = *(u32 *)key;
+	u32 index = key ? *(u32 *)key : U32_MAX;
 	u32 *next = (u32 *)next_key;
 
 	if (index >= array->map.max_entries) {
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 27f4f2c..9c86d5d 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -328,12 +328,15 @@
 	struct hlist_head *head;
 	struct htab_elem *l, *next_l;
 	u32 hash, key_size;
-	int i;
+	int i = 0;
 
 	WARN_ON_ONCE(!rcu_read_lock_held());
 
 	key_size = map->key_size;
 
+	if (!key)
+		goto find_first_elem;
+
 	hash = htab_map_hash(key, key_size);
 
 	head = select_bucket(htab, hash);
@@ -341,10 +344,8 @@
 	/* lookup the key */
 	l = lookup_elem_raw(head, hash, key, key_size);
 
-	if (!l) {
-		i = 0;
+	if (!l)
 		goto find_first_elem;
-	}
 
 	/* key was found, get next key in the same bucket */
 	next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 41aa664..85ea598 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -579,14 +579,18 @@
 		goto err_put;
 	}
 
-	err = -ENOMEM;
-	key = kmalloc(map->key_size, GFP_USER);
-	if (!key)
-		goto err_put;
+	if (ukey) {
+		err = -ENOMEM;
+		key = kmalloc(map->key_size, GFP_USER);
+		if (!key)
+			goto err_put;
 
-	err = -EFAULT;
-	if (copy_from_user(key, ukey, map->key_size) != 0)
-		goto free_key;
+		err = -EFAULT;
+		if (copy_from_user(key, ukey, map->key_size) != 0)
+			goto free_key;
+	} else {
+		key = NULL;
+	}
 
 	err = -ENOMEM;
 	next_key = kmalloc(map->key_size, GFP_USER);
diff --git a/kernel/cfi.c b/kernel/cfi.c
index 87053e2..6951c25 100644
--- a/kernel/cfi.c
+++ b/kernel/cfi.c
@@ -23,12 +23,12 @@
 #define cfi_slowpath_handler	__cfi_slowpath
 #endif /* CONFIG_CFI_PERMISSIVE */
 
-static inline void handle_cfi_failure()
+static inline void handle_cfi_failure(void *ptr)
 {
 #ifdef CONFIG_CFI_PERMISSIVE
-	WARN_RATELIMIT(1, "CFI failure:\n");
+	WARN_RATELIMIT(1, "CFI failure (target: [<%px>] %pF):\n", ptr, ptr);
 #else
-	pr_err("CFI failure:\n");
+	pr_err("CFI failure (target: [<%px>] %pF):\n", ptr, ptr);
 	BUG();
 #endif
 }
@@ -282,18 +282,18 @@
 	if (likely(check))
 		check(id, ptr, diag);
 	else /* Don't allow unchecked modules */
-		handle_cfi_failure();
+		handle_cfi_failure(ptr);
 }
 EXPORT_SYMBOL(cfi_slowpath_handler);
 #endif /* CONFIG_MODULES */
 
-void cfi_failure_handler(void *data, void *value, void *vtable)
+void cfi_failure_handler(void *data, void *ptr, void *vtable)
 {
-	handle_cfi_failure();
+	handle_cfi_failure(ptr);
 }
 EXPORT_SYMBOL(cfi_failure_handler);
 
-void __cfi_check_fail(void *data, void *value)
+void __cfi_check_fail(void *data, void *ptr)
 {
-	handle_cfi_failure();
+	handle_cfi_failure(ptr);
 }
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 2a20c0d..5a58421 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1564,6 +1564,7 @@
 	int symbolic = 0;
 	int valid = 0;
 	int phys = 0;
+	int raw = 0;
 
 	kdbgetintenv("MDCOUNT", &mdcount);
 	kdbgetintenv("RADIX", &radix);
@@ -1573,9 +1574,10 @@
 	repeat = mdcount * 16 / bytesperword;
 
 	if (strcmp(argv[0], "mdr") == 0) {
-		if (argc != 2)
+		if (argc == 2 || (argc == 0 && last_addr != 0))
+			valid = raw = 1;
+		else
 			return KDB_ARGCOUNT;
-		valid = 1;
 	} else if (isdigit(argv[0][2])) {
 		bytesperword = (int)(argv[0][2] - '0');
 		if (bytesperword == 0) {
@@ -1611,7 +1613,10 @@
 		radix = last_radix;
 		bytesperword = last_bytesperword;
 		repeat = last_repeat;
-		mdcount = ((repeat * bytesperword) + 15) / 16;
+		if (raw)
+			mdcount = repeat;
+		else
+			mdcount = ((repeat * bytesperword) + 15) / 16;
 	}
 
 	if (argc) {
@@ -1628,7 +1633,10 @@
 			diag = kdbgetularg(argv[nextarg], &val);
 			if (!diag) {
 				mdcount = (int) val;
-				repeat = mdcount * 16 / bytesperword;
+				if (raw)
+					repeat = mdcount;
+				else
+					repeat = mdcount * 16 / bytesperword;
 			}
 		}
 		if (argc >= nextarg+1) {
@@ -1638,8 +1646,15 @@
 		}
 	}
 
-	if (strcmp(argv[0], "mdr") == 0)
-		return kdb_mdr(addr, mdcount);
+	if (strcmp(argv[0], "mdr") == 0) {
+		int ret;
+		last_addr = addr;
+		ret = kdb_mdr(addr, mdcount);
+		last_addr += mdcount;
+		last_repeat = mdcount;
+		last_bytesperword = bytesperword; // to make REPEAT happy
+		return ret;
+	}
 
 	switch (radix) {
 	case 10:
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 411226b..c265f1c 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -117,23 +117,20 @@
 		goto exit;
 	}
 
-	if (count > 1) {
-		/* If the allocation failed, give up */
-		if (!callchain_cpus_entries)
-			err = -ENOMEM;
-		/*
-		 * If requesting per event more than the global cap,
-		 * return a different error to help userspace figure
-		 * this out.
-		 *
-		 * And also do it here so that we have &callchain_mutex held.
-		 */
-		if (event_max_stack > sysctl_perf_event_max_stack)
-			err = -EOVERFLOW;
+	/*
+	 * If requesting per event more than the global cap,
+	 * return a different error to help userspace figure
+	 * this out.
+	 *
+	 * And also do it here so that we have &callchain_mutex held.
+	 */
+	if (event_max_stack > sysctl_perf_event_max_stack) {
+		err = -EOVERFLOW;
 		goto exit;
 	}
 
-	err = alloc_callchain_buffers();
+	if (count == 1)
+		err = alloc_callchain_buffers();
 exit:
 	if (err)
 		atomic_dec(&nr_callchain_events);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2ea4eb1..ee74fff 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -486,7 +486,7 @@
 				void __user *buffer, size_t *lenp,
 				loff_t *ppos)
 {
-	int ret = proc_dointvec(table, write, buffer, lenp, ppos);
+	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 
 	if (ret || !write)
 		return ret;
@@ -667,9 +667,15 @@
 
 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
 {
-	struct perf_cgroup *cgrp_out = cpuctx->cgrp;
-	if (cgrp_out)
-		__update_cgrp_time(cgrp_out);
+	struct perf_cgroup *cgrp = cpuctx->cgrp;
+	struct cgroup_subsys_state *css;
+
+	if (cgrp) {
+		for (css = &cgrp->css; css; css = css->parent) {
+			cgrp = container_of(css, struct perf_cgroup, css);
+			__update_cgrp_time(cgrp);
+		}
+	}
 }
 
 static inline void update_cgrp_time_from_event(struct perf_event *event)
@@ -697,6 +703,7 @@
 {
 	struct perf_cgroup *cgrp;
 	struct perf_cgroup_info *info;
+	struct cgroup_subsys_state *css;
 
 	/*
 	 * ctx->lock held by caller
@@ -707,8 +714,12 @@
 		return;
 
 	cgrp = perf_cgroup_from_task(task, ctx);
-	info = this_cpu_ptr(cgrp->info);
-	info->timestamp = ctx->timestamp;
+
+	for (css = &cgrp->css; css; css = css->parent) {
+		cgrp = container_of(css, struct perf_cgroup, css);
+		info = this_cpu_ptr(cgrp->info);
+		info->timestamp = ctx->timestamp;
+	}
 }
 
 #define PERF_CGROUP_SWOUT	0x1 /* cgroup switch out every event */
@@ -5819,7 +5830,8 @@
 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 		values[n++] = running;
 
-	if (leader != event)
+	if ((leader != event) &&
+	    (leader->state == PERF_EVENT_STATE_ACTIVE))
 		leader->pmu->read(leader);
 
 	values[n++] = perf_event_count(leader);
@@ -9708,9 +9720,9 @@
 		 * __u16 sample size limit.
 		 */
 		if (attr->sample_stack_user >= USHRT_MAX)
-			ret = -EINVAL;
+			return -EINVAL;
 		else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
-			ret = -EINVAL;
+			return -EINVAL;
 	}
 
 	if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 257fa46..017f793 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/circ_buf.h>
 #include <linux/poll.h>
+#include <linux/nospec.h>
 
 #include "internal.h"
 
@@ -844,8 +845,10 @@
 			return NULL;
 
 		/* AUX space */
-		if (pgoff >= rb->aux_pgoff)
-			return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
+		if (pgoff >= rb->aux_pgoff) {
+			int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
+			return virt_to_page(rb->aux_pages[aux_pgoff]);
+		}
 	}
 
 	return __perf_mmap_to_page(rb, pgoff);
diff --git a/kernel/exit.c b/kernel/exit.c
index 4b4f03a..ee8c601 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -717,6 +717,7 @@
 	static DEFINE_SPINLOCK(low_water_lock);
 	static int lowest_to_date = THREAD_SIZE;
 	unsigned long free;
+	int islower = false;
 
 	free = stack_not_used(current);
 
@@ -725,11 +726,15 @@
 
 	spin_lock(&low_water_lock);
 	if (free < lowest_to_date) {
-		pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
-			current->comm, task_pid_nr(current), free);
 		lowest_to_date = free;
+		islower = true;
 	}
 	spin_unlock(&low_water_lock);
+
+	if (islower) {
+		pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
+				current->comm, task_pid_nr(current), free);
+	}
 }
 #else
 static inline void check_stack_usage(void) {}
@@ -1680,6 +1685,10 @@
 			__WNOTHREAD|__WCLONE|__WALL))
 		return -EINVAL;
 
+	/* -INT_MIN is not defined */
+	if (upid == INT_MIN)
+		return -ESRCH;
+
 	if (upid == -1)
 		type = PIDTYPE_MAX;
 	else if (upid < 0) {
diff --git a/kernel/futex.c b/kernel/futex.c
index bb2265a..c3ea6f2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1458,6 +1458,45 @@
 	return ret;
 }
 
+static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
+{
+	unsigned int op =	  (encoded_op & 0x70000000) >> 28;
+	unsigned int cmp =	  (encoded_op & 0x0f000000) >> 24;
+	int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
+	int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
+	int oldval, ret;
+
+	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
+		if (oparg < 0 || oparg > 31)
+			return -EINVAL;
+		oparg = 1 << oparg;
+	}
+
+	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+		return -EFAULT;
+
+	ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
+	if (ret)
+		return ret;
+
+	switch (cmp) {
+	case FUTEX_OP_CMP_EQ:
+		return oldval == cmparg;
+	case FUTEX_OP_CMP_NE:
+		return oldval != cmparg;
+	case FUTEX_OP_CMP_LT:
+		return oldval < cmparg;
+	case FUTEX_OP_CMP_GE:
+		return oldval >= cmparg;
+	case FUTEX_OP_CMP_LE:
+		return oldval <= cmparg;
+	case FUTEX_OP_CMP_GT:
+		return oldval > cmparg;
+	default:
+		return -ENOSYS;
+	}
+}
+
 /*
  * Wake up all waiters hashed on the physical page that is mapped
  * to this virtual address:
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 5616755..f5ab72e 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -38,6 +38,7 @@
 #include <linux/syscore_ops.h>
 #include <linux/compiler.h>
 #include <linux/hugetlb.h>
+#include <linux/frame.h>
 
 #include <asm/page.h>
 #include <asm/sections.h>
@@ -878,7 +879,7 @@
  * only when panic_cpu holds the current CPU number; this is the only CPU
  * which processes crash_kexec routines.
  */
-void __crash_kexec(struct pt_regs *regs)
+void __noclone __crash_kexec(struct pt_regs *regs)
 {
 	/* Take the kexec_mutex here to prevent sys_kexec_load
 	 * running on one cpu from replacing the crash kernel
@@ -900,6 +901,7 @@
 		mutex_unlock(&kexec_mutex);
 	}
 }
+STACK_FRAME_NON_STANDARD(__crash_kexec);
 
 void crash_kexec(struct pt_regs *regs)
 {
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index 19248dd..c7471c3 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -20,51 +20,14 @@
 #include <linux/cpumask.h>
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
+#include <linux/spinlock.h>
 #include <asm/qrwlock.h>
 
-/*
- * This internal data structure is used for optimizing access to some of
- * the subfields within the atomic_t cnts.
- */
-struct __qrwlock {
-	union {
-		atomic_t cnts;
-		struct {
-#ifdef __LITTLE_ENDIAN
-			u8 wmode;	/* Writer mode   */
-			u8 rcnts[3];	/* Reader counts */
-#else
-			u8 rcnts[3];	/* Reader counts */
-			u8 wmode;	/* Writer mode   */
-#endif
-		};
-	};
-	arch_spinlock_t	lock;
-};
-
-/**
- * rspin_until_writer_unlock - inc reader count & spin until writer is gone
- * @lock  : Pointer to queue rwlock structure
- * @writer: Current queue rwlock writer status byte
- *
- * In interrupt context or at the head of the queue, the reader will just
- * increment the reader count & wait until the writer releases the lock.
- */
-static __always_inline void
-rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
-{
-	while ((cnts & _QW_WMASK) == _QW_LOCKED) {
-		cpu_relax_lowlatency();
-		cnts = atomic_read_acquire(&lock->cnts);
-	}
-}
-
 /**
  * queued_read_lock_slowpath - acquire read lock of a queue rwlock
  * @lock: Pointer to queue rwlock structure
- * @cnts: Current qrwlock lock value
  */
-void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
+void queued_read_lock_slowpath(struct qrwlock *lock)
 {
 	/*
 	 * Readers come here when they cannot get the lock without waiting
@@ -72,13 +35,11 @@
 	if (unlikely(in_interrupt())) {
 		/*
 		 * Readers in interrupt context will get the lock immediately
-		 * if the writer is just waiting (not holding the lock yet).
-		 * The rspin_until_writer_unlock() function returns immediately
-		 * in this case. Otherwise, they will spin (with ACQUIRE
-		 * semantics) until the lock is available without waiting in
-		 * the queue.
+		 * if the writer is just waiting (not holding the lock yet),
+		 * so spin with ACQUIRE semantics until the lock is available
+		 * without waiting in the queue.
 		 */
-		rspin_until_writer_unlock(lock, cnts);
+		atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
 		return;
 	}
 	atomic_sub(_QR_BIAS, &lock->cnts);
@@ -87,14 +48,14 @@
 	 * Put the reader into the wait queue
 	 */
 	arch_spin_lock(&lock->wait_lock);
+	atomic_add(_QR_BIAS, &lock->cnts);
 
 	/*
 	 * The ACQUIRE semantics of the following spinning code ensure
 	 * that accesses can't leak upwards out of our subsequent critical
 	 * section in the case that the lock is currently held for write.
 	 */
-	cnts = atomic_fetch_add_acquire(_QR_BIAS, &lock->cnts);
-	rspin_until_writer_unlock(lock, cnts);
+	atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
 
 	/*
 	 * Signal the next one in queue to become queue head
@@ -109,8 +70,6 @@
  */
 void queued_write_lock_slowpath(struct qrwlock *lock)
 {
-	u32 cnts;
-
 	/* Put the writer into the wait queue */
 	arch_spin_lock(&lock->wait_lock);
 
@@ -119,30 +78,14 @@
 	    (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
 		goto unlock;
 
-	/*
-	 * Set the waiting flag to notify readers that a writer is pending,
-	 * or wait for a previous writer to go away.
-	 */
-	for (;;) {
-		struct __qrwlock *l = (struct __qrwlock *)lock;
+	/* Set the waiting flag to notify readers that a writer is pending */
+	atomic_add(_QW_WAITING, &lock->cnts);
 
-		if (!READ_ONCE(l->wmode) &&
-		   (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0))
-			break;
-
-		cpu_relax_lowlatency();
-	}
-
-	/* When no more readers, set the locked flag */
-	for (;;) {
-		cnts = atomic_read(&lock->cnts);
-		if ((cnts == _QW_WAITING) &&
-		    (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING,
-					    _QW_LOCKED) == _QW_WAITING))
-			break;
-
-		cpu_relax_lowlatency();
-	}
+	/* When no more readers or writers, set the locked flag */
+	do {
+		atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
+	} while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
+					_QW_LOCKED) != _QW_WAITING);
 unlock:
 	arch_spin_unlock(&lock->wait_lock);
 }
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index b2caec7..a72f5df 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -495,6 +495,14 @@
 	tail = encode_tail(smp_processor_id(), idx);
 
 	node += idx;
+
+	/*
+	 * Ensure that we increment the head node->count before initialising
+	 * the actual node. If the compiler is kind enough to reorder these
+	 * stores, then an IRQ could overwrite our assignments.
+	 */
+	barrier();
+
 	node->locked = 0;
 	node->next = NULL;
 	pv_init_node(node);
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 56d1d0d..ccba4d8 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -103,9 +103,6 @@
 extern dev_t swsusp_resume_device;
 extern sector_t swsusp_resume_block;
 
-extern asmlinkage int swsusp_arch_suspend(void);
-extern asmlinkage int swsusp_arch_resume(void);
-
 extern int create_basic_memory_bitmaps(void);
 extern void free_basic_memory_bitmaps(void);
 extern int hibernate_preallocate_memory(void);
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 0469d80..d6a0b0f 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -478,8 +478,6 @@
 	val = c->default_value;
 
 	for_each_cpu(cpu, mask) {
-		if (cpu_isolated(cpu))
-			continue;
 
 		switch (c->type) {
 		case PM_QOS_MIN:
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index e3944c4..554ea54 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -521,8 +521,14 @@
 	}
 	t = list_entry(rnp->gp_tasks->prev,
 		       struct task_struct, rcu_node_entry);
-	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
+	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
+		/*
+		 * We could be printing a lot while holding a spinlock.
+		 * Avoid triggering hard lockup.
+		 */
+		touch_nmi_watchdog();
 		sched_show_task(t);
+	}
 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
@@ -1629,6 +1635,12 @@
 	char *ticks_title;
 	unsigned long ticks_value;
 
+	/*
+	 * We could be printing a lot while holding a spinlock.  Avoid
+	 * triggering hard lockup.
+	 */
+	touch_nmi_watchdog();
+
 	if (rsp->gpnum == rdp->gpnum) {
 		ticks_title = "ticks this GP";
 		ticks_value = rdp->ticks_this_gp;
diff --git a/kernel/relay.c b/kernel/relay.c
index 2603e04..91e8fbf 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -163,7 +163,7 @@
 {
 	struct rchan_buf *buf;
 
-	if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
+	if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *))
 		return NULL;
 
 	buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fb2e56c..b755eb8 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2172,7 +2172,7 @@
 	rq = cpu_rq(task_cpu(p));
 	raw_spin_lock(&rq->lock);
 	old_load = task_load(p);
-	wallclock = ktime_get_ns();
+	wallclock = sched_ktime_clock();
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 	update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
 	raw_spin_unlock(&rq->lock);
@@ -2259,7 +2259,7 @@
 	trace_sched_waking(p);
 
 	if (!task_on_rq_queued(p)) {
-		u64 wallclock = ktime_get_ns();
+		u64 wallclock = sched_ktime_clock();
 
 		update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 		update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
@@ -3261,7 +3261,7 @@
 	old_load = task_load(curr);
 	set_window_start(rq);
 
-	wallclock = ktime_get_ns();
+	wallclock = sched_ktime_clock();
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 
 	update_rq_clock(rq);
@@ -3633,7 +3633,7 @@
 	clear_preempt_need_resched();
 	rq->clock_skip_update = 0;
 
-	wallclock = ktime_get_ns();
+	wallclock = sched_ktime_clock();
 	if (likely(prev != next)) {
 		if (!prev->on_rq)
 			prev->last_sleep_ts = wallclock;
@@ -9611,7 +9611,7 @@
 	rq = task_rq_lock(p, &rf);
 
 	/* rq->curr == p */
-	wallclock = ktime_get_ns();
+	wallclock = sched_ktime_clock();
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 	dequeue_task(rq, p, 0);
 	/*
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 26c9cf4..bd64b1a 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -789,6 +789,7 @@
 	unsigned long flags;
 	unsigned int num_cpus = cluster->num_cpus;
 	unsigned int nr_isolated = 0;
+	bool first_pass = cluster->nr_not_preferred_cpus;
 
 	/*
 	 * Protect against entry being removed (and added at tail) by other
@@ -834,6 +835,7 @@
 	cluster->nr_isolated_cpus += nr_isolated;
 	spin_unlock_irqrestore(&state_lock, flags);
 
+again:
 	/*
 	 * If the number of active CPUs is within the limits, then
 	 * don't force isolation of any busy CPUs.
@@ -853,6 +855,9 @@
 		if (cluster->active_cpus <= cluster->max_cpus)
 			break;
 
+		if (first_pass && !c->not_preferred)
+			continue;
+
 		spin_unlock_irqrestore(&state_lock, flags);
 
 		pr_debug("Trying to isolate CPU%u\n", c->cpu);
@@ -869,6 +874,10 @@
 	cluster->nr_isolated_cpus += nr_isolated;
 	spin_unlock_irqrestore(&state_lock, flags);
 
+	if (first_pass && cluster->active_cpus > cluster->max_cpus) {
+		first_pass = false;
+		goto again;
+	}
 }
 
 static void __try_to_unisolate(struct cluster_data *cluster,
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 895c63f..b71f116 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -466,7 +466,7 @@
 	mutex_lock(&sg_policy->work_lock);
 	raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
 	sugov_track_cycles(sg_policy, sg_policy->policy->cur,
-			   ktime_get_ns());
+			   sched_ktime_clock());
 	raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
 	__cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
 				CPUFREQ_RELATION_L);
@@ -919,7 +919,7 @@
 		mutex_lock(&sg_policy->work_lock);
 		raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
 		sugov_track_cycles(sg_policy, sg_policy->policy->cur,
-				   ktime_get_ns());
+				   sched_ktime_clock());
 		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
 		cpufreq_policy_apply_limits(policy);
 		mutex_unlock(&sg_policy->work_lock);
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 87bea1e..aec86a26 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -133,6 +133,8 @@
 
 		if (lowest_mask) {
 			cpumask_and(lowest_mask, tsk_cpus_allowed(p), vec->mask);
+			cpumask_andnot(lowest_mask, lowest_mask,
+				       cpu_isolated_mask);
 			if (drop_nopreempts)
 				drop_nopreempt_cpus(lowest_mask);
 			/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 78c433a..6b1be1d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5710,6 +5710,9 @@
 	for_each_cpu(i, sched_group_cpus(sg))
 		state = min(state, idle_get_state_idx(cpu_rq(i)));
 
+	if (unlikely(state == INT_MAX))
+		return -EINVAL;
+
 	/* Take non-cpuidle idling into account (active idle/arch_cpu_idle()) */
 	state++;
 
@@ -5776,7 +5779,7 @@
  * The required scaling will be performed just one time, by the calling
  * functions, once we accumulated the contributons for all the SGs.
  */
-static void calc_sg_energy(struct energy_env *eenv)
+static int calc_sg_energy(struct energy_env *eenv)
 {
 	struct sched_group *sg = eenv->sg;
 	int busy_energy, idle_energy;
@@ -5805,6 +5808,11 @@
 
 		/* Compute IDLE energy */
 		idle_idx = group_idle_state(eenv, cpu_idx);
+		if (unlikely(idle_idx < 0))
+			return idle_idx;
+		if (idle_idx > sg->sge->nr_idle_states - 1)
+			idle_idx = sg->sge->nr_idle_states - 1;
+
 		idle_power = sg->sge->idle_states[idle_idx].power;
 
 		idle_energy   = SCHED_CAPACITY_SCALE - sg_util;
@@ -5813,6 +5821,7 @@
 		total_energy = busy_energy + idle_energy;
 		eenv->cpu[cpu_idx].energy += total_energy;
 	}
+	return 0;
 }
 
 /*
@@ -5874,7 +5883,8 @@
 				 * CPUs in the current visited SG.
 				 */
 				eenv->sg = sg;
-				calc_sg_energy(eenv);
+				if (calc_sg_energy(eenv))
+					return -EINVAL;
 
 				/* remove CPUs we have just visited */
 				if (!sd->child) {
@@ -6155,7 +6165,8 @@
 
 bool __cpu_overutilized(int cpu, int delta)
 {
-	return (capacity_of(cpu) * 1024) < ((cpu_util(cpu) + delta) * capacity_margin);
+	return (capacity_orig_of(cpu) * 1024) <
+			((cpu_util(cpu) + delta) * capacity_margin);
 }
 
 bool cpu_overutilized(int cpu)
@@ -6260,7 +6271,7 @@
 
 static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
 {
-	return capacity_orig_of(cpu) - cpu_util_wake(cpu, p);
+	return max_t(long, capacity_of(cpu) - cpu_util_wake(cpu, p), 0);
 }
 
 /*
@@ -6918,6 +6929,7 @@
 	int target_cpu = -1;
 	int cpu, i;
 	unsigned int active_cpus_count = 0;
+	int isolated_candidate = -1;
 
 	*backup_cpu = -1;
 
@@ -6963,13 +6975,16 @@
 			unsigned long wake_util, new_util, min_capped_util;
 
 			cpumask_clear_cpu(i, &search_cpus);
+
+			if (!cpu_online(i) || cpu_isolated(i))
+				continue;
+
+			isolated_candidate = i;
+
 			if (avoid_prev_cpu && i == task_cpu(p))
 				continue;
 
-			if (!cpu_online(i) || cpu_isolated(i) || is_reserved(i))
-				continue;
-
-			if (walt_cpu_high_irqload(i))
+			if (walt_cpu_high_irqload(i) || is_reserved(i))
 				continue;
 
 			trace_sched_cpu_util(i);
@@ -7255,6 +7270,12 @@
 		? best_active_cpu
 		: best_idle_cpu;
 
+	if (target_cpu == -1 && cpu_isolated(task_cpu(p)) &&
+			isolated_candidate != -1) {
+		target_cpu = isolated_candidate;
+		fbt_env->avoid_prev_cpu = true;
+	}
+
 	trace_sched_find_best_target(p, prefer_idle, min_util, cpu,
 				     best_idle_cpu, best_active_cpu,
 				     target_cpu);
@@ -11799,7 +11820,7 @@
 	if (is_max_capacity_cpu(src_cpu))
 		return;
 
-	wc = ktime_get_ns();
+	wc = sched_ktime_clock();
 	for_each_possible_cpu(i) {
 		struct rq *rq = cpu_rq(i);
 
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 7a32e5a..44c767a 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -839,6 +839,8 @@
 		struct rq *rq = rq_of_rt_rq(rt_rq);
 
 		raw_spin_lock(&rq->lock);
+		update_rq_clock(rq);
+
 		if (rt_rq->rt_time) {
 			u64 runtime;
 
@@ -1834,7 +1836,7 @@
 			if (avoid_prev_cpu && cpu == prev_cpu)
 				continue;
 
-			if (__cpu_overutilized(cpu, util + tutil))
+			if (__cpu_overutilized(cpu, tutil))
 				continue;
 
 			if (cpu_isolated(cpu))
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 61aa3c7..e24df36 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1861,7 +1861,7 @@
 }
 
 #ifdef CONFIG_SCHED_WALT
-extern atomic64_t walt_irq_work_lastq_ws;
+extern u64 walt_load_reported_window;
 
 static inline unsigned long
 cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
@@ -1899,7 +1899,7 @@
 		walt_load->prev_window_util = util;
 		walt_load->nl = nl;
 		walt_load->pl = pl;
-		walt_load->ws = atomic64_read(&walt_irq_work_lastq_ws);
+		walt_load->ws = walt_load_reported_window;
 	}
 
 	return (util >= capacity) ? capacity : util;
@@ -2234,8 +2234,13 @@
 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
 
 #ifdef CONFIG_SCHED_WALT
+u64 sched_ktime_clock(void);
 void note_task_waking(struct task_struct *p, u64 wallclock);
 #else /* CONFIG_SCHED_WALT */
+static inline u64 sched_ktime_clock(void)
+{
+	return 0;
+}
 static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
 #endif /* CONFIG_SCHED_WALT */
 
@@ -2276,7 +2281,7 @@
 	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
 					cpu_of(rq)));
 	if (data)
-		data->func(data, ktime_get_ns(), flags);
+		data->func(data, sched_ktime_clock(), flags);
 }
 
 static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index a2debf9..cba8a6b 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -19,6 +19,7 @@
  *             and Todd Kjos
  */
 
+#include <linux/syscore_ops.h>
 #include <linux/cpufreq.h>
 #include <linux/list_sort.h>
 #include <linux/jiffies.h>
@@ -41,14 +42,48 @@
 
 #define EARLY_DETECTION_DURATION 9500000
 
+static ktime_t ktime_last;
+static bool sched_ktime_suspended;
 static struct cpu_cycle_counter_cb cpu_cycle_counter_cb;
 static bool use_cycle_counter;
 DEFINE_MUTEX(cluster_lock);
-atomic64_t walt_irq_work_lastq_ws;
+static atomic64_t walt_irq_work_lastq_ws;
+u64 walt_load_reported_window;
 
 static struct irq_work walt_cpufreq_irq_work;
 static struct irq_work walt_migration_irq_work;
 
+u64 sched_ktime_clock(void)
+{
+	if (unlikely(sched_ktime_suspended))
+		return ktime_to_ns(ktime_last);
+	return ktime_get_ns();
+}
+
+static void sched_resume(void)
+{
+	sched_ktime_suspended = false;
+}
+
+static int sched_suspend(void)
+{
+	ktime_last = ktime_get();
+	sched_ktime_suspended = true;
+	return 0;
+}
+
+static struct syscore_ops sched_syscore_ops = {
+	.resume	= sched_resume,
+	.suspend = sched_suspend
+};
+
+static int __init sched_init_ops(void)
+{
+	register_syscore_ops(&sched_syscore_ops);
+	return 0;
+}
+late_initcall(sched_init_ops);
+
 static void acquire_rq_locks_irqsave(const cpumask_t *cpus,
 				     unsigned long *flags)
 {
@@ -269,12 +304,7 @@
 	u64 old_window_start = rq->window_start;
 
 	delta = wallclock - rq->window_start;
-	/* If the MPM global timer is cleared, set delta as 0 to avoid kernel BUG happening */
-	if (delta < 0) {
-		delta = 0;
-		WARN_ONCE(1, "WALT wallclock appears to have gone backwards or reset\n");
-	}
-
+	BUG_ON(delta < 0);
 	if (delta < sched_ravg_window)
 		return old_window_start;
 
@@ -365,7 +395,7 @@
 	if (is_idle_task(curr)) {
 		/* We're here without rq->lock held, IRQ disabled */
 		raw_spin_lock(&rq->lock);
-		update_task_cpu_cycles(curr, cpu, ktime_get_ns());
+		update_task_cpu_cycles(curr, cpu, sched_ktime_clock());
 		raw_spin_unlock(&rq->lock);
 	}
 }
@@ -426,7 +456,7 @@
 	cur_jiffies_ts = get_jiffies_64();
 
 	if (is_idle_task(curr))
-		update_task_ravg(curr, rq, IRQ_UPDATE, ktime_get_ns(),
+		update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(),
 				 delta);
 
 	nr_windows = cur_jiffies_ts - rq->irqload_ts;
@@ -763,7 +793,7 @@
 	if (sched_disable_window_stats)
 		goto done;
 
-	wallclock = ktime_get_ns();
+	wallclock = sched_ktime_clock();
 
 	update_task_ravg(task_rq(p)->curr, task_rq(p),
 			 TASK_UPDATE,
@@ -866,6 +896,9 @@
 		rq->window_start = 1;
 		sync_cpu_available = 1;
 		atomic64_set(&walt_irq_work_lastq_ws, rq->window_start);
+		walt_load_reported_window =
+					atomic64_read(&walt_irq_work_lastq_ws);
+
 	} else {
 		struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask));
 
@@ -2047,7 +2080,7 @@
 		return;
 	}
 
-	wallclock = ktime_get_ns();
+	wallclock = sched_ktime_clock();
 	p->ravg.mark_start = p->last_wake_ts = wallclock;
 	p->last_enqueued_ts = wallclock;
 	p->last_switch_out_ts = 0;
@@ -2449,7 +2482,7 @@
 
 				raw_spin_lock_irqsave(&rq->lock, flags);
 				update_task_ravg(rq->curr, rq, TASK_UPDATE,
-						 ktime_get_ns(), 0);
+						 sched_ktime_clock(), 0);
 				raw_spin_unlock_irqrestore(&rq->lock, flags);
 			}
 		}
@@ -2599,7 +2632,7 @@
 		return;
 	}
 
-	wallclock = ktime_get_ns();
+	wallclock = sched_ktime_clock();
 
 	/*
 	 * wakeup of two or more related tasks could race with each other and
@@ -2626,7 +2659,7 @@
 
 	grp->preferred_cluster = best_cluster(grp,
 			combined_demand, group_boost);
-	grp->last_update = ktime_get_ns();
+	grp->last_update = sched_ktime_clock();
 	trace_sched_set_preferred_cluster(grp, combined_demand);
 }
 
@@ -2650,7 +2683,7 @@
 	 * has passed since we last updated preference
 	 */
 	if (abs(new_load - old_load) > sched_ravg_window / 4 ||
-		ktime_get_ns() - grp->last_update > sched_ravg_window)
+		sched_ktime_clock() - grp->last_update > sched_ravg_window)
 		return 1;
 
 	return 0;
@@ -3033,7 +3066,7 @@
 	bool new_task;
 	int i;
 
-	wallclock = ktime_get_ns();
+	wallclock = sched_ktime_clock();
 
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 	update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0);
@@ -3141,7 +3174,8 @@
 	for_each_cpu(cpu, cpu_possible_mask)
 		raw_spin_lock(&cpu_rq(cpu)->lock);
 
-	wc = ktime_get_ns();
+	wc = sched_ktime_clock();
+	walt_load_reported_window = atomic64_read(&walt_irq_work_lastq_ws);
 
 	for_each_sched_cluster(cluster) {
 		u64 aggr_grp_load = 0;
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index be06e7d4..cac925c 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -292,7 +292,7 @@
 
 static inline void walt_update_last_enqueue(struct task_struct *p)
 {
-	p->last_enqueued_ts = ktime_get_ns();
+	p->last_enqueued_ts = sched_ktime_clock();
 }
 extern void walt_rotate_work_init(void);
 extern void walt_rotation_checkpoint(int nr_big);
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index af182a6..3975856 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -16,6 +16,8 @@
 #include <linux/atomic.h>
 #include <linux/audit.h>
 #include <linux/compat.h>
+#include <linux/nospec.h>
+#include <linux/prctl.h>
 #include <linux/sched.h>
 #include <linux/seccomp.h>
 #include <linux/slab.h>
@@ -214,8 +216,11 @@
 	return true;
 }
 
+void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
+
 static inline void seccomp_assign_mode(struct task_struct *task,
-				       unsigned long seccomp_mode)
+				       unsigned long seccomp_mode,
+				       unsigned long flags)
 {
 	assert_spin_locked(&task->sighand->siglock);
 
@@ -225,6 +230,9 @@
 	 * filter) is set.
 	 */
 	smp_mb__before_atomic();
+	/* Assume default seccomp processes want spec flaw mitigation. */
+	if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
+		arch_seccomp_spec_mitigate(task);
 	set_tsk_thread_flag(task, TIF_SECCOMP);
 }
 
@@ -292,7 +300,7 @@
  * without dropping the locks.
  *
  */
-static inline void seccomp_sync_threads(void)
+static inline void seccomp_sync_threads(unsigned long flags)
 {
 	struct task_struct *thread, *caller;
 
@@ -333,7 +341,8 @@
 		 * allow one thread to transition the other.
 		 */
 		if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
-			seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
+			seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
+					    flags);
 	}
 }
 
@@ -452,7 +461,7 @@
 
 	/* Now that the new filter is in place, synchronize to all threads. */
 	if (flags & SECCOMP_FILTER_FLAG_TSYNC)
-		seccomp_sync_threads();
+		seccomp_sync_threads(flags);
 
 	return 0;
 }
@@ -712,7 +721,7 @@
 #ifdef TIF_NOTSC
 	disable_TSC();
 #endif
-	seccomp_assign_mode(current, seccomp_mode);
+	seccomp_assign_mode(current, seccomp_mode, 0);
 	ret = 0;
 
 out:
@@ -770,7 +779,7 @@
 	/* Do not free the successfully attached filter. */
 	prepared = NULL;
 
-	seccomp_assign_mode(current, seccomp_mode);
+	seccomp_assign_mode(current, seccomp_mode, flags);
 out:
 	spin_unlock_irq(&current->sighand->siglock);
 	if (flags & SECCOMP_FILTER_FLAG_TSYNC)
diff --git a/kernel/signal.c b/kernel/signal.c
index 7ebe236..4364e57 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1392,6 +1392,10 @@
 		return ret;
 	}
 
+	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
+	if (pid == INT_MIN)
+		return -ESRCH;
+
 	read_lock(&tasklist_lock);
 	if (pid != -1) {
 		ret = __kill_pgrp_info(sig, info,
@@ -2495,6 +2499,13 @@
 {
 	struct task_struct *tsk = current;
 
+	/*
+	 * In case the signal mask hasn't changed, there is nothing we need
+	 * to do. The current->blocked shouldn't be modified by other task.
+	 */
+	if (sigequalsets(&tsk->blocked, newset))
+		return;
+
 	spin_lock_irq(&tsk->sighand->siglock);
 	__set_task_blocked(tsk, newset);
 	spin_unlock_irq(&tsk->sighand->siglock);
diff --git a/kernel/sys.c b/kernel/sys.c
index 4ccf5f0..af33cbf 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -55,6 +55,8 @@
 #include <linux/uidgid.h>
 #include <linux/cred.h>
 
+#include <linux/nospec.h>
+
 #include <linux/kmsg_dump.h>
 /* Move somewhere else to avoid recompiling? */
 #include <generated/utsrelease.h>
@@ -1313,6 +1315,7 @@
 	if (resource >= RLIM_NLIMITS)
 		return -EINVAL;
 
+	resource = array_index_nospec(resource, RLIM_NLIMITS);
 	task_lock(current->group_leader);
 	x = current->signal->rlim[resource];
 	task_unlock(current->group_leader);
@@ -2221,6 +2224,17 @@
 }
 #endif
 
+int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
+{
+	return -EINVAL;
+}
+
+int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
+				    unsigned long ctrl)
+{
+	return -EINVAL;
+}
+
 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
 		unsigned long, arg4, unsigned long, arg5)
 {
@@ -2440,6 +2454,16 @@
 	case PR_GET_FP_MODE:
 		error = GET_FP_MODE(me);
 		break;
+	case PR_GET_SPECULATION_CTRL:
+		if (arg3 || arg4 || arg5)
+			return -EINVAL;
+		error = arch_prctl_spec_ctrl_get(me, arg2);
+		break;
+	case PR_SET_SPECULATION_CTRL:
+		if (arg4 || arg5)
+			return -EINVAL;
+		error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
+		break;
 	case PR_SET_VMA:
 		error = prctl_set_vma(arg2, arg3, arg4, arg5);
 		break;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f41c0e9..09a1611 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -398,22 +398,6 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
-#ifdef CONFIG_SCHED_WALT
-	{
-		.procname	= "sched_use_walt_cpu_util",
-		.data		= &sysctl_sched_use_walt_cpu_util,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-	{
-		.procname	= "sched_use_walt_task_util",
-		.data		= &sysctl_sched_use_walt_task_util,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-#endif
 	{
 		.procname	= "sched_cstate_aware",
 		.data		= &sysctl_sched_cstate_aware,
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index d2a20e8..22d7454 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -610,6 +610,14 @@
 	now = ktime_get();
 	/* Find all expired events */
 	for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
+		/*
+		 * Required for !SMP because for_each_cpu() reports
+		 * unconditionally CPU0 as set on UP kernels.
+		 */
+		if (!IS_ENABLED(CONFIG_SMP) &&
+		    cpumask_empty(tick_broadcast_oneshot_mask))
+			break;
+
 		td = &per_cpu(tick_cpu_device, cpu);
 		if (td->evtdev->next_event.tv64 <= now.tv64) {
 			cpumask_set_cpu(cpu, tmpmask);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 063dd22..5534be1 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -120,8 +120,9 @@
 				 struct ftrace_ops *op, struct pt_regs *regs);
 #else
 /* See comment below, where ftrace_ops_list_func is defined */
-static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
-#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
+static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip,
+			      struct ftrace_ops *op, struct pt_regs *regs);
+#define ftrace_ops_list_func ftrace_ops_no_ops
 #endif
 
 /*
@@ -5309,7 +5310,8 @@
 	__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
 }
 #else
-static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
+static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip,
+			      struct ftrace_ops *op, struct pt_regs *regs)
 {
 	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
 }
@@ -5735,14 +5737,17 @@
 	fgraph_graph_time = enable;
 }
 
+void ftrace_graph_return_stub(struct ftrace_graph_ret *trace)
+{
+}
+
 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
 {
 	return 0;
 }
 
 /* The callbacks that hook a function */
-trace_func_graph_ret_t ftrace_graph_return =
-			(trace_func_graph_ret_t)ftrace_stub;
+trace_func_graph_ret_t ftrace_graph_return = ftrace_graph_return_stub;
 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
 
@@ -5970,7 +5975,7 @@
 		goto out;
 
 	ftrace_graph_active--;
-	ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
+	ftrace_graph_return = ftrace_graph_return_stub;
 	ftrace_graph_entry = ftrace_graph_entry_stub;
 	__ftrace_graph_entry = ftrace_graph_entry_stub;
 	ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
diff --git a/kernel/trace/ipc_logging_debug.c b/kernel/trace/ipc_logging_debug.c
index 7a4c630..3a13ef3 100644
--- a/kernel/trace/ipc_logging_debug.c
+++ b/kernel/trace/ipc_logging_debug.c
@@ -78,17 +78,16 @@
 	struct dentry *d = file->f_path.dentry;
 	char *buffer;
 	int bsize;
-	int srcu_idx;
 	int r;
 
-	r = debugfs_use_file_start(d, &srcu_idx);
+	r = debugfs_file_get(d);
 	if (!r) {
 		ilctxt = file->private_data;
 		r = kref_get_unless_zero(&ilctxt->refcount) ? 0 : -EIO;
-	}
-	debugfs_use_file_finish(srcu_idx);
-	if (r)
+	} else {
 		return r;
+	}
+	debugfs_file_put(d);
 
 	buffer = kmalloc(count, GFP_KERNEL);
 	if (!buffer) {
@@ -119,18 +118,17 @@
 	struct ipc_log_context *ilctxt;
 	struct dentry *d = file->f_path.dentry;
 	int bsize = 1;
-	int srcu_idx;
 	int r;
 	char local_buf[3];
 
-	r = debugfs_use_file_start(d, &srcu_idx);
+	r = debugfs_file_get(d);
 	if (!r) {
 		ilctxt = file->private_data;
 		r = kref_get_unless_zero(&ilctxt->refcount) ? 0 : -EIO;
-	}
-	debugfs_use_file_finish(srcu_idx);
-	if (r)
+	} else {
 		return r;
+	}
+	debugfs_file_put(d);
 
 	if (copy_from_user(local_buf, buff, bsize)) {
 		count = -EFAULT;
@@ -172,17 +170,16 @@
 	struct ipc_log_context *ilctxt;
 	struct dentry *d = file->f_path.dentry;
 	int bsize = 2;
-	int srcu_idx;
 	int r;
 
-	r = debugfs_use_file_start(d, &srcu_idx);
+	r = debugfs_file_get(d);
 	if (!r) {
 		ilctxt = file->private_data;
 		r = kref_get_unless_zero(&ilctxt->refcount) ? 0 : -EIO;
-	}
-	debugfs_use_file_finish(srcu_idx);
-	if (r)
+	} else {
 		return r;
+	}
+	debugfs_file_put(d);
 
 	bsize = simple_read_from_buffer(buff, count, ppos,
 				ilctxt->disabled?"1\n":"0\n", bsize);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 046abb0..de11b81 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3078,13 +3078,14 @@
 	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
 		return;
 
-	if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
+	if (cpumask_available(iter->started) &&
+	    cpumask_test_cpu(iter->cpu, iter->started))
 		return;
 
 	if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
 		return;
 
-	if (iter->started)
+	if (cpumask_available(iter->started))
 		cpumask_set_cpu(iter->cpu, iter->started);
 
 	/* Don't print started cpu buffer for the first entry of the trace */
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 0193f58..e35a411 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -322,6 +322,9 @@
 
 static int regex_match_front(char *str, struct regex *r, int len)
 {
+	if (len < r->len)
+		return 0;
+
 	if (strncmp(str, r->pattern, r->len) == 0)
 		return 1;
 	return 0;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index bc6c6ec..83afbf2 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -149,6 +149,8 @@
 		return;
 
 	ret = strncpy_from_user(dst, src, maxlen);
+	if (ret == maxlen)
+		dst[--ret] = '\0';
 
 	if (ret < 0) {	/* Failed to fetch string */
 		((u8 *)get_rloc_data(dest))[0] = '\0';
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index d0639d9..c8e7cc0 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -202,7 +202,7 @@
 			lockdep_is_held(&tracepoints_mutex));
 	old = func_add(&tp_funcs, func, prio);
 	if (IS_ERR(old)) {
-		WARN_ON_ONCE(1);
+		WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
 		return PTR_ERR(old);
 	}
 
@@ -235,7 +235,7 @@
 			lockdep_is_held(&tracepoints_mutex));
 	old = func_remove(&tp_funcs, func);
 	if (IS_ERR(old)) {
-		WARN_ON_ONCE(1);
+		WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
 		return PTR_ERR(old);
 	}
 
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 04c2289..3806a97 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -5276,7 +5276,7 @@
 
 	ret = device_register(&wq_dev->dev);
 	if (ret) {
-		kfree(wq_dev);
+		put_device(&wq_dev->dev);
 		wq->wq_dev = NULL;
 		return ret;
 	}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 079d91a..640818c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -269,7 +269,6 @@
 
 config DEBUG_FS
 	bool "Debug Filesystem"
-	select SRCU
 	help
 	  debugfs is a virtual file system that kernel developers use to put
 	  debugging files into.  Enable this option to be able to read and
@@ -707,6 +706,19 @@
 
 source "lib/Kconfig.kasan"
 
+config DEBUG_REFCOUNT
+	bool "Verbose refcount checks"
+	help
+	  Say Y here if you want reference counters (refcount_t and kref) to
+	  generate WARNs on dubious usage. Without this refcount_t will still
+	  be a saturating counter and avoid Use-After-Free by turning it into
+	  a resource leak Denial-Of-Service.
+
+	  Use of this option will increase kernel text size but will alert the
+	  admin of potential abuse.
+
+	  If in doubt, say "N".
+
 endmenu # "Memory Debugging"
 
 config ARCH_HAS_KCOV
diff --git a/lib/kobject.c b/lib/kobject.c
index 763d70a..34f8472 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -234,14 +234,12 @@
 
 		/* be noisy on error issues */
 		if (error == -EEXIST)
-			WARN(1, "%s failed for %s with "
-			     "-EEXIST, don't try to register things with "
-			     "the same name in the same directory.\n",
-			     __func__, kobject_name(kobj));
+			pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
+			       __func__, kobject_name(kobj));
 		else
-			WARN(1, "%s failed for %s (error: %d parent: %s)\n",
-			     __func__, kobject_name(kobj), error,
-			     parent ? kobject_name(parent) : "'none'");
+			pr_err("%s failed for %s (error: %d parent: %s)\n",
+			       __func__, kobject_name(kobj), error,
+			       parent ? kobject_name(parent) : "'none'");
 	} else
 		kobj->state_in_sysfs = 1;
 
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 98da752..1586dfd 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -83,6 +83,7 @@
 		__u32 result;
 	} test[MAX_SUBTESTS];
 	int (*fill_helper)(struct bpf_test *self);
+	int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */
 	__u8 frag_data[MAX_DATA];
 };
 
@@ -1900,7 +1901,9 @@
 		},
 		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
 		{ },
-		{ }
+		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
 	},
 	{
 		"check: div_k_0",
@@ -1910,7 +1913,9 @@
 		},
 		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
 		{ },
-		{ }
+		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
 	},
 	{
 		"check: unknown insn",
@@ -1921,7 +1926,9 @@
 		},
 		CLASSIC | FLAG_EXPECTED_FAIL,
 		{ },
-		{ }
+		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
 	},
 	{
 		"check: out of range spill/fill",
@@ -1931,7 +1938,9 @@
 		},
 		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
 		{ },
-		{ }
+		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
 	},
 	{
 		"JUMPS + HOLES",
@@ -2023,6 +2032,8 @@
 		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
 		{ },
 		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
 	},
 	{
 		"check: LDX + RET X",
@@ -2033,6 +2044,8 @@
 		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
 		{ },
 		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
 	},
 	{	/* Mainly checking JIT here. */
 		"M[]: alt STX + LDX",
@@ -2207,6 +2220,8 @@
 		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
 		{ },
 		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
 	},
 	{	/* Passes checker but fails during runtime. */
 		"LD [SKF_AD_OFF-1]",
@@ -4803,6 +4818,7 @@
 		{ },
 		{ },
 		.fill_helper = bpf_fill_maxinsns4,
+		.expected_errcode = -EINVAL,
 	},
 	{	/* Mainly checking JIT here. */
 		"BPF_MAXINSNS: Very long jump",
@@ -4858,10 +4874,15 @@
 	{
 		"BPF_MAXINSNS: Jump, gap, jump, ...",
 		{ },
+#ifdef CONFIG_BPF_JIT_ALWAYS_ON
+		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+#else
 		CLASSIC | FLAG_NO_DATA,
+#endif
 		{ },
 		{ { 0, 0xababcbac } },
 		.fill_helper = bpf_fill_maxinsns11,
+		.expected_errcode = -ENOTSUPP,
 	},
 	{
 		"BPF_MAXINSNS: ld_abs+get_processor_id",
@@ -5632,7 +5653,7 @@
 
 		*err = bpf_prog_create(&fp, &fprog);
 		if (tests[which].aux & FLAG_EXPECTED_FAIL) {
-			if (*err == -EINVAL) {
+			if (*err == tests[which].expected_errcode) {
 				pr_cont("PASS\n");
 				/* Verifier rejected filter as expected. */
 				*err = 0;
diff --git a/mm/Kconfig b/mm/Kconfig
index 3363a70..051f7bc 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -675,6 +675,7 @@
 	depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
 	depends on NO_BOOTMEM && MEMORY_HOTPLUG
 	depends on !FLATMEM
+	depends on !NEED_PER_CPU_KM
 	help
 	  Ordinarily all struct pages are initialised during early boot in a
 	  single thread. On very large machines this can take a considerable
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 6c707bf..27fc9ad 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -126,7 +126,15 @@
 		 */
 		start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
 		end_index = (endbyte >> PAGE_SHIFT);
-		if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) {
+		/*
+		 * The page at end_index will be inclusively discarded according
+		 * by invalidate_mapping_pages(), so subtracting 1 from
+		 * end_index means we will skip the last page.  But if endbyte
+		 * is page aligned or is at the end of file, we should not skip
+		 * that page - discarding the last page is safe enough.
+		 */
+		if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK &&
+				endbyte != inode->i_size - 1) {
 			/* First page is tricky as 0 - 1 = -1, but pgoff_t
 			 * is unsigned, so the end_index >= start_index
 			 * check below would be true and we'll discard the whole
diff --git a/mm/gup.c b/mm/gup.c
index 6c3b4e8..be4ccdd 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -430,6 +430,9 @@
 	if (vm_flags & (VM_IO | VM_PFNMAP))
 		return -EFAULT;
 
+	if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
+		return -EFAULT;
+
 	if (write) {
 		if (!(vm_flags & VM_WRITE)) {
 			if (!(gup_flags & FOLL_FORCE))
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e2982ea..7243728 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -542,7 +542,8 @@
 
 	VM_BUG_ON_PAGE(!PageCompound(page), page);
 
-	if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) {
+	if (mem_cgroup_try_charge(page, vma->vm_mm, gfp | __GFP_NORETRY, &memcg,
+				  true)) {
 		put_page(page);
 		count_vm_event(THP_FAULT_FALLBACK);
 		return VM_FAULT_FALLBACK;
@@ -1060,7 +1061,7 @@
 	}
 
 	if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm,
-					huge_gfp, &memcg, true))) {
+				huge_gfp | __GFP_NORETRY, &memcg, true))) {
 		put_page(new_page);
 		split_huge_pmd(vma, fe->pmd, fe->address);
 		if (page)
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index ab47d93..7d78b5f 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -860,5 +860,5 @@
 	return 0;
 }
 
-module_init(kasan_memhotplug_init);
+core_initcall(kasan_memhotplug_init);
 #endif
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 898eb26..1df37ee 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -963,7 +963,9 @@
 		goto out_nolock;
 	}
 
-	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
+	/* Do not oom kill for khugepaged charges */
+	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
+					   &memcg, true))) {
 		result = SCAN_CGROUP_CHARGE_FAIL;
 		goto out_nolock;
 	}
@@ -1323,7 +1325,9 @@
 		goto out;
 	}
 
-	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
+	/* Do not oom kill for khugepaged charges */
+	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
+					   &memcg, true))) {
 		result = SCAN_CGROUP_CHARGE_FAIL;
 		goto out;
 	}
@@ -1678,10 +1682,14 @@
 	spin_unlock(&khugepaged_mm_lock);
 
 	mm = mm_slot->mm;
-	down_read(&mm->mmap_sem);
-	if (unlikely(khugepaged_test_exit(mm)))
-		vma = NULL;
-	else
+	/*
+	 * Don't wait for semaphore (to avoid long wait times).  Just move to
+	 * the next mm on the list.
+	 */
+	vma = NULL;
+	if (unlikely(!down_read_trylock(&mm->mmap_sem)))
+		goto breakouterloop_mmap_sem;
+	if (likely(!khugepaged_test_exit(mm)))
 		vma = find_vma(mm, khugepaged_scan.address);
 
 	progress++;
diff --git a/mm/ksm.c b/mm/ksm.c
index 927aa34..8450f9b 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1487,8 +1487,22 @@
 	tree_rmap_item =
 		unstable_tree_search_insert(rmap_item, page, &tree_page);
 	if (tree_rmap_item) {
+		bool split;
+
 		kpage = try_to_merge_two_pages(rmap_item, page,
 						tree_rmap_item, tree_page);
+		/*
+		 * If both pages we tried to merge belong to the same compound
+		 * page, then we actually ended up increasing the reference
+		 * count of the same compound page twice, and split_huge_page
+		 * failed.
+		 * Here we set a flag if that happened, and we use it later to
+		 * try split_huge_page again. Since we call put_page right
+		 * afterwards, the reference count will be correct and
+		 * split_huge_page should succeed.
+		 */
+		split = PageTransCompound(page)
+			&& compound_head(page) == compound_head(tree_page);
 		put_page(tree_page);
 		if (kpage) {
 			/*
@@ -1513,6 +1527,20 @@
 				break_cow(tree_rmap_item);
 				break_cow(rmap_item);
 			}
+		} else if (split) {
+			/*
+			 * We are here if we tried to merge two pages and
+			 * failed because they both belonged to the same
+			 * compound page. We will split the page now, but no
+			 * merging will take place.
+			 * We do not want to add the cost of a full lock; if
+			 * the page is locked, it is better to skip it and
+			 * perhaps try again later.
+			 */
+			if (!trylock_page(page))
+				return;
+			split_huge_page(page);
+			unlock_page(page);
 		}
 	}
 }
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 9547583..d0adeef 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1265,6 +1265,7 @@
 		     unsigned long maxnode)
 {
 	unsigned long k;
+	unsigned long t;
 	unsigned long nlongs;
 	unsigned long endmask;
 
@@ -1281,13 +1282,19 @@
 	else
 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
 
-	/* When the user specified more nodes than supported just check
-	   if the non supported part is all zero. */
+	/*
+	 * When the user specified more nodes than supported just check
+	 * if the non supported part is all zero.
+	 *
+	 * If maxnode have more longs than MAX_NUMNODES, check
+	 * the bits in that area first. And then go through to
+	 * check the rest bits which equal or bigger than MAX_NUMNODES.
+	 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
+	 */
 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
 		if (nlongs > PAGE_SIZE/sizeof(long))
 			return -EINVAL;
 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
-			unsigned long t;
 			if (get_user(t, nmask + k))
 				return -EFAULT;
 			if (k == nlongs - 1) {
@@ -1300,6 +1307,16 @@
 		endmask = ~0UL;
 	}
 
+	if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
+		unsigned long valid_mask = endmask;
+
+		valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
+		if (get_user(t, nmask + nlongs - 1))
+			return -EFAULT;
+		if (t & valid_mask)
+			return -EINVAL;
+	}
+
 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
 		return -EFAULT;
 	nodes_addr(*nodes)[nlongs-1] &= endmask;
@@ -1426,10 +1443,14 @@
 		goto out_put;
 	}
 
-	if (!nodes_subset(*new, node_states[N_MEMORY])) {
-		err = -EINVAL;
+	task_nodes = cpuset_mems_allowed(current);
+	nodes_and(*new, *new, task_nodes);
+	if (nodes_empty(*new))
 		goto out_put;
-	}
+
+	nodes_and(*new, *new, node_states[N_MEMORY]);
+	if (nodes_empty(*new))
+		goto out_put;
 
 	err = security_task_movememory(task);
 	if (err)
@@ -2139,6 +2160,9 @@
 	case MPOL_INTERLEAVE:
 		return !!nodes_equal(a->v.nodes, b->v.nodes);
 	case MPOL_PREFERRED:
+		/* a's ->flags is the same as b's */
+		if (a->flags & MPOL_F_LOCAL)
+			return true;
 		return a->v.preferred_node == b->v.preferred_node;
 	default:
 		BUG();
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b74f30e..1345072 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6851,9 +6851,10 @@
 			    mult_frac(zone->managed_pages,
 				      watermark_scale_factor, 10000));
 
-		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + low + min;
-		zone->watermark[WMARK_HIGH] =
-					min_wmark_pages(zone) + low + min * 2;
+		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) +
+					low + min;
+		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) +
+					low + min * 2;
 
 		spin_unlock_irqrestore(&zone->lock, flags);
 	}
diff --git a/mm/percpu.c b/mm/percpu.c
index f014ceb..3794cfc 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -70,6 +70,7 @@
 #include <linux/vmalloc.h>
 #include <linux/workqueue.h>
 #include <linux/kmemleak.h>
+#include <linux/sched.h>
 
 #include <asm/cacheflush.h>
 #include <asm/sections.h>
diff --git a/mm/swap.c b/mm/swap.c
index 4dcf852..6f22754 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -208,9 +208,10 @@
 {
 	int *pgmoved = arg;
 
-	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
-		enum lru_list lru = page_lru_base_type(page);
-		list_move_tail(&page->lru, &lruvec->lists[lru]);
+	if (PageLRU(page) && !PageUnevictable(page)) {
+		del_page_from_lru_list(page, lruvec, page_lru(page));
+		ClearPageActive(page);
+		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
 		(*pgmoved)++;
 	}
 }
@@ -234,7 +235,7 @@
  */
 void rotate_reclaimable_page(struct page *page)
 {
-	if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
+	if (!PageLocked(page) && !PageDirty(page) &&
 	    !PageUnevictable(page) && PageLRU(page)) {
 		struct pagevec *pvec;
 		unsigned long flags;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 9cf2595..7b439c9 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2313,6 +2313,10 @@
 	maxpages = swp_offset(pte_to_swp_entry(
 			swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
 	last_page = swap_header->info.last_page;
+	if (!last_page) {
+		pr_warn("Empty swap-file\n");
+		return 0;
+	}
 	if (last_page > maxpages) {
 		pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
 			maxpages << (PAGE_SHIFT - 10),
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c5b94d61..4daac9a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -87,6 +87,7 @@
 	/* The highest zone to isolate pages for reclaim from */
 	enum zone_type reclaim_idx;
 
+	/* Writepage batching in laptop mode; RECLAIM_WRITE */
 	unsigned int may_writepage:1;
 
 	/* Can mapped pages be reclaimed? */
@@ -1057,6 +1058,15 @@
 		 *    throttling so we could easily OOM just because too many
 		 *    pages are in writeback and there is nothing else to
 		 *    reclaim. Wait for the writeback to complete.
+		 *
+		 * In cases 1) and 2) we activate the pages to get them out of
+		 * the way while we continue scanning for clean pages on the
+		 * inactive list and refilling from the active list. The
+		 * observation here is that waiting for disk writes is more
+		 * expensive than potentially causing reloads down the line.
+		 * Since they're marked for immediate reclaim, they won't put
+		 * memory pressure on the cache working set any longer than it
+		 * takes to write them to disk.
 		 */
 		if (PageWriteback(page)) {
 			/* Case 1 above */
@@ -1064,7 +1074,7 @@
 			    PageReclaim(page) &&
 			    (pgdat && test_bit(PGDAT_WRITEBACK, &pgdat->flags))) {
 				nr_immediate++;
-				goto keep_locked;
+				goto activate_locked;
 
 			/* Case 2 above */
 			} else if (sane_reclaim(sc) ||
@@ -1082,7 +1092,7 @@
 				 */
 				SetPageReclaim(page);
 				nr_writeback++;
-				goto keep_locked;
+				goto activate_locked;
 
 			/* Case 3 above */
 			} else {
@@ -1153,14 +1163,18 @@
 
 		if (PageDirty(page)) {
 			/*
-			 * Only kswapd can writeback filesystem pages to
-			 * avoid risk of stack overflow but only writeback
-			 * if many dirty pages have been encountered.
+			 * Only kswapd can writeback filesystem pages
+			 * to avoid risk of stack overflow. But avoid
+			 * injecting inefficient single-page IO into
+			 * flusher writeback as much as possible: only
+			 * write pages when we've encountered many
+			 * dirty pages, and when we've already scanned
+			 * the rest of the LRU for clean pages and see
+			 * the same dirty pages again (PageReclaim).
 			 */
 			if (page_is_file_cache(page) &&
-					(!current_is_kswapd() ||
-					(pgdat &&
-					 !test_bit(PGDAT_DIRTY, &pgdat->flags)))) {
+			    (!current_is_kswapd() || !PageReclaim(page) ||
+			     !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
 				/*
 				 * Immediately reclaim when written back.
 				 * Similar in principal to deactivate_page()
@@ -1170,7 +1184,7 @@
 				inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
 				SetPageReclaim(page);
 
-				goto keep_locked;
+				goto activate_locked;
 			}
 
 			if (references == PAGEREF_RECLAIM_CLEAN)
@@ -1416,31 +1430,34 @@
 	 * wants to isolate pages it will be able to operate on without
 	 * blocking - clean pages for the most part.
 	 *
-	 * ISOLATE_CLEAN means that only clean pages should be isolated. This
-	 * is used by reclaim when it is cannot write to backing storage
-	 *
 	 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
 	 * that it is possible to migrate without blocking
 	 */
-	if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
+	if (mode & ISOLATE_ASYNC_MIGRATE) {
 		/* All the caller can do on PageWriteback is block */
 		if (PageWriteback(page))
 			return ret;
 
 		if (PageDirty(page)) {
 			struct address_space *mapping;
-
-			/* ISOLATE_CLEAN means only clean pages */
-			if (mode & ISOLATE_CLEAN)
-				return ret;
+			bool migrate_dirty;
 
 			/*
 			 * Only pages without mappings or that have a
 			 * ->migratepage callback are possible to migrate
-			 * without blocking
+			 * without blocking. However, we can be racing with
+			 * truncation so it's necessary to lock the page
+			 * to stabilise the mapping as truncation holds
+			 * the page lock until after the page is removed
+			 * from the page cache.
 			 */
+			if (!trylock_page(page))
+				return ret;
+
 			mapping = page_mapping(page);
-			if (mapping && !mapping->a_ops->migratepage)
+			migrate_dirty = mapping && mapping->a_ops->migratepage;
+			unlock_page(page);
+			if (!migrate_dirty)
 				return ret;
 		}
 	}
@@ -1831,8 +1848,6 @@
 
 	if (!sc->may_unmap)
 		isolate_mode |= ISOLATE_UNMAPPED;
-	if (!sc->may_writepage)
-		isolate_mode |= ISOLATE_CLEAN;
 
 	spin_lock_irq(&pgdat->lru_lock);
 
@@ -1894,6 +1909,20 @@
 		set_bit(PGDAT_WRITEBACK, &pgdat->flags);
 
 	/*
+	 * If dirty pages are scanned that are not queued for IO, it
+	 * implies that flushers are not doing their job. This can
+	 * happen when memory pressure pushes dirty pages to the end of
+	 * the LRU before the dirty limits are breached and the dirty
+	 * data has expired. It can also happen when the proportion of
+	 * dirty pages grows not through writes but through memory
+	 * pressure reclaiming all the clean cache. And in some cases,
+	 * the flushers simply cannot keep up with the allocation
+	 * rate. Nudge the flusher threads in case they are asleep.
+	 */
+	if (nr_unqueued_dirty == nr_taken)
+		wakeup_flusher_threads(0, WB_REASON_VMSCAN);
+
+	/*
 	 * Legacy memcg will stall in page writeback so avoid forcibly
 	 * stalling here.
 	 */
@@ -1905,12 +1934,7 @@
 		if (nr_dirty && nr_dirty == nr_congested)
 			set_bit(PGDAT_CONGESTED, &pgdat->flags);
 
-		/*
-		 * If dirty pages are scanned that are not queued for IO, it
-		 * implies that flushers are not keeping up. In this case, flag
-		 * the pgdat PGDAT_DIRTY and kswapd will start writing pages from
-		 * reclaim context.
-		 */
+		/* Allow kswapd to start writing pages during reclaim. */
 		if (nr_unqueued_dirty == nr_taken)
 			set_bit(PGDAT_DIRTY, &pgdat->flags);
 
@@ -2020,8 +2044,6 @@
 
 	if (!sc->may_unmap)
 		isolate_mode |= ISOLATE_UNMAPPED;
-	if (!sc->may_writepage)
-		isolate_mode |= ISOLATE_CLEAN;
 
 	spin_lock_irq(&pgdat->lru_lock);
 
@@ -2823,8 +2845,6 @@
 					  struct scan_control *sc)
 {
 	int initial_priority = sc->priority;
-	unsigned long total_scanned = 0;
-	unsigned long writeback_threshold;
 retry:
 	delayacct_freepages_start();
 
@@ -2837,7 +2857,6 @@
 		sc->nr_scanned = 0;
 		shrink_zones(zonelist, sc);
 
-		total_scanned += sc->nr_scanned;
 		if (sc->nr_reclaimed >= sc->nr_to_reclaim)
 			break;
 
@@ -2850,20 +2869,6 @@
 		 */
 		if (sc->priority < DEF_PRIORITY - 2)
 			sc->may_writepage = 1;
-
-		/*
-		 * Try to write back as many pages as we just scanned.  This
-		 * tends to cause slow streaming writers to write data to the
-		 * disk smoothly, at the dirtying rate, which is nice.   But
-		 * that's undesirable in laptop mode, where we *want* lumpy
-		 * writeout.  So in laptop mode, write out the whole world.
-		 */
-		writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
-		if (total_scanned > writeback_threshold) {
-			wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
-						WB_REASON_TRY_TO_FREE_PAGES);
-			sc->may_writepage = 1;
-		}
 	} while (--sc->priority >= 0);
 
 	delayacct_freepages_end();
@@ -3925,7 +3930,13 @@
  */
 int page_evictable(struct page *page)
 {
-	return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
+	int ret;
+
+	/* Prevent address_space of inode and swap cache from being freed */
+	rcu_read_lock();
+	ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
+	rcu_read_unlock();
+	return ret;
 }
 
 #ifdef CONFIG_SHMEM
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 5d26938..1e84c52 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -41,6 +41,9 @@
 #include <linux/module.h>
 #include <linux/init.h>
 
+/* Hardening for Spectre-v1 */
+#include <linux/nospec.h>
+
 #include "lec.h"
 #include "lec_arpc.h"
 #include "resources.h"
@@ -697,8 +700,10 @@
 	bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
 	if (bytes_left != 0)
 		pr_info("copy from user failed for %d bytes\n", bytes_left);
-	if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
-	    !dev_lec[ioc_data.dev_num])
+	if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF)
+		return -EINVAL;
+	ioc_data.dev_num = array_index_nospec(ioc_data.dev_num, MAX_LEC_ITF);
+	if (!dev_lec[ioc_data.dev_num])
 		return -EINVAL;
 	vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
 	if (!vpriv)
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index e2d18d0..946f1c2 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -2705,7 +2705,7 @@
 	struct batadv_neigh_ifinfo *router_ifinfo = NULL;
 	struct batadv_neigh_node *router;
 	struct batadv_gw_node *curr_gw;
-	int ret = -EINVAL;
+	int ret = 0;
 	void *hdr;
 
 	router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index e79f6f0..ed4ddf2 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -920,7 +920,7 @@
 	struct batadv_neigh_ifinfo *router_ifinfo = NULL;
 	struct batadv_neigh_node *router;
 	struct batadv_gw_node *curr_gw;
-	int ret = -EINVAL;
+	int ret = 0;
 	void *hdr;
 
 	router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 5419b12..582e276 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -2149,22 +2149,25 @@
 {
 	struct batadv_bla_claim *claim;
 	int idx = 0;
+	int ret = 0;
 
 	rcu_read_lock();
 	hlist_for_each_entry_rcu(claim, head, hash_entry) {
 		if (idx++ < *idx_skip)
 			continue;
-		if (batadv_bla_claim_dump_entry(msg, portid, seq,
-						primary_if, claim)) {
+
+		ret = batadv_bla_claim_dump_entry(msg, portid, seq,
+						  primary_if, claim);
+		if (ret) {
 			*idx_skip = idx - 1;
 			goto unlock;
 		}
 	}
 
-	*idx_skip = idx;
+	*idx_skip = 0;
 unlock:
 	rcu_read_unlock();
-	return 0;
+	return ret;
 }
 
 /**
@@ -2379,22 +2382,25 @@
 {
 	struct batadv_bla_backbone_gw *backbone_gw;
 	int idx = 0;
+	int ret = 0;
 
 	rcu_read_lock();
 	hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
 		if (idx++ < *idx_skip)
 			continue;
-		if (batadv_bla_backbone_dump_entry(msg, portid, seq,
-						   primary_if, backbone_gw)) {
+
+		ret = batadv_bla_backbone_dump_entry(msg, portid, seq,
+						     primary_if, backbone_gw);
+		if (ret) {
 			*idx_skip = idx - 1;
 			goto unlock;
 		}
 	}
 
-	*idx_skip = idx;
+	*idx_skip = 0;
 unlock:
 	rcu_read_unlock();
-	return 0;
+	return ret;
 }
 
 /**
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index e257efd..df7c6a0 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -391,7 +391,7 @@
 		   batadv_arp_hw_src(skb, hdr_size), &ip_src,
 		   batadv_arp_hw_dst(skb, hdr_size), &ip_dst);
 
-	if (hdr_size == 0)
+	if (hdr_size < sizeof(struct batadv_unicast_packet))
 		return;
 
 	unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 0934730..57215e3 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -276,7 +276,8 @@
 	/* Move the existing MAC header to just before the payload. (Override
 	 * the fragment header.)
 	 */
-	skb_pull_rcsum(skb_out, hdr_size);
+	skb_pull(skb_out, hdr_size);
+	skb_out->ip_summed = CHECKSUM_NONE;
 	memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
 	skb_set_mac_header(skb_out, -ETH_HLEN);
 	skb_reset_network_header(skb_out);
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index de055d6..ed9aaf3 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -715,6 +715,9 @@
 
 	vid = batadv_get_vid(skb, 0);
 
+	if (is_multicast_ether_addr(ethhdr->h_dest))
+		goto out;
+
 	orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
 						 ethhdr->h_dest, vid);
 	if (!orig_dst_node)
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 13661f4..5a2aac1 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -527,8 +527,8 @@
 		bat_priv->mcast.enabled = true;
 	}
 
-	return !(mcast_data.flags &
-		 (BATADV_MCAST_WANT_ALL_IPV4 | BATADV_MCAST_WANT_ALL_IPV6));
+	return !(mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV4 &&
+		 mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV6);
 }
 
 /**
@@ -769,8 +769,8 @@
 batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
 			      struct ethhdr *ethhdr)
 {
-	return batadv_transtable_search(bat_priv, ethhdr->h_source,
-					ethhdr->h_dest, BATADV_NO_FLAGS);
+	return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest,
+					BATADV_NO_FLAGS);
 }
 
 /**
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 7e8dc64..8b98609 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -724,6 +724,7 @@
 /**
  * batadv_reroute_unicast_packet - update the unicast header for re-routing
  * @bat_priv: the bat priv with all the soft interface information
+ * @skb: unicast packet to process
  * @unicast_packet: the unicast header to be updated
  * @dst_addr: the payload destination
  * @vid: VLAN identifier
@@ -735,7 +736,7 @@
  * Return: true if the packet header has been updated, false otherwise
  */
 static bool
-batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
+batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
 			      struct batadv_unicast_packet *unicast_packet,
 			      u8 *dst_addr, unsigned short vid)
 {
@@ -764,8 +765,10 @@
 	}
 
 	/* update the packet header */
+	skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
 	ether_addr_copy(unicast_packet->dest, orig_addr);
 	unicast_packet->ttvn = orig_ttvn;
+	skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
 
 	ret = true;
 out:
@@ -806,7 +809,7 @@
 	 * the packet to
 	 */
 	if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
-		if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
+		if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
 						  ethhdr->h_dest, vid))
 			batadv_dbg_ratelimited(BATADV_DBG_TT,
 					       bat_priv,
@@ -852,7 +855,7 @@
 	 * destination can possibly be updated and forwarded towards the new
 	 * target host
 	 */
-	if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
+	if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
 					  ethhdr->h_dest, vid)) {
 		batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv,
 				       "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
@@ -875,12 +878,14 @@
 	if (!primary_if)
 		return false;
 
+	/* update the packet header */
+	skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
 	ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
+	unicast_packet->ttvn = curr_ttvn;
+	skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
 
 	batadv_hardif_put(primary_if);
 
-	unicast_packet->ttvn = curr_ttvn;
-
 	return true;
 }
 
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 49e16b6..84c1b38 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -448,13 +448,7 @@
 
 	/* skb->dev & skb->pkt_type are set here */
 	skb->protocol = eth_type_trans(skb, soft_iface);
-
-	/* should not be necessary anymore as we use skb_pull_rcsum()
-	 * TODO: please verify this and remove this TODO
-	 * -- Dec 21st 2009, Simon Wunderlich
-	 */
-
-	/* skb->ip_summed = CHECKSUM_UNNECESSARY; */
+	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
 
 	batadv_inc_counter(bat_priv, BATADV_CNT_RX);
 	batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 1fc0764..1811f8e 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -431,8 +431,8 @@
 		del_timer(&session->timer);
 }
 
-static void hidp_process_report(struct hidp_session *session,
-				int type, const u8 *data, int len, int intr)
+static void hidp_process_report(struct hidp_session *session, int type,
+				const u8 *data, unsigned int len, int intr)
 {
 	if (len > HID_MAX_BUFFER_SIZE)
 		len = HID_MAX_BUFFER_SIZE;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 9218931..f57de0a 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -504,8 +504,8 @@
 	if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
 		return -ELOOP;
 
-	/* Device is already being bridged */
-	if (br_port_exists(dev))
+	/* Device has master upper dev */
+	if (netdev_master_upper_dev_get(dev))
 		return -EBUSY;
 
 	/* No bridging devices that dislike that (e.g. wireless) */
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 5a89a4a..0a9222e 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1625,7 +1625,8 @@
 	int off = ebt_compat_match_offset(match, m->match_size);
 	compat_uint_t msize = m->match_size - off;
 
-	BUG_ON(off >= m->match_size);
+	if (WARN_ON(off >= m->match_size))
+		return -EINVAL;
 
 	if (copy_to_user(cm->u.name, match->name,
 	    strlen(match->name) + 1) || put_user(msize, &cm->match_size))
@@ -1652,7 +1653,8 @@
 	int off = xt_compat_target_offset(target);
 	compat_uint_t tsize = t->target_size - off;
 
-	BUG_ON(off >= t->target_size);
+	if (WARN_ON(off >= t->target_size))
+		return -EINVAL;
 
 	if (copy_to_user(cm->u.name, target->name,
 	    strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
@@ -1880,7 +1882,8 @@
 	if (state->buf_kern_start == NULL)
 		goto count_only;
 
-	BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
+	if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len))
+		return -EINVAL;
 
 	memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
 
@@ -1893,7 +1896,8 @@
 {
 	char *b = state->buf_kern_start;
 
-	BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
+	if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len))
+		return -EINVAL;
 
 	if (b != NULL && sz > 0)
 		memset(b + state->buf_kern_offset, 0, sz);
@@ -1970,8 +1974,10 @@
 	pad = XT_ALIGN(size_kern) - size_kern;
 
 	if (pad > 0 && dst) {
-		BUG_ON(state->buf_kern_len <= pad);
-		BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
+		if (WARN_ON(state->buf_kern_len <= pad))
+			return -EINVAL;
+		if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad))
+			return -EINVAL;
 		memset(dst + size_kern, 0, pad);
 	}
 	return off + match_size;
@@ -2021,7 +2027,8 @@
 		if (ret < 0)
 			return ret;
 
-		BUG_ON(ret < match32->match_size);
+		if (WARN_ON(ret < match32->match_size))
+			return -EINVAL;
 		growth += ret - match32->match_size;
 		growth += ebt_compat_entry_padsize();
 
@@ -2090,8 +2097,12 @@
 	 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
 	 */
 	for (i = 0; i < 4 ; ++i) {
-		if (offsets[i] >= *total)
+		if (offsets[i] > *total)
 			return -EINVAL;
+
+		if (i < 3 && offsets[i] == *total)
+			return -EINVAL;
+
 		if (i == 0)
 			continue;
 		if (offsets[i-1] > offsets[i])
@@ -2130,7 +2141,8 @@
 
 	startoff = state->buf_user_offset - startoff;
 
-	BUG_ON(*total < startoff);
+	if (WARN_ON(*total < startoff))
+		return -EINVAL;
 	*total -= startoff;
 	return 0;
 }
@@ -2257,7 +2269,8 @@
 	state.buf_kern_len = size64;
 
 	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
-	BUG_ON(ret < 0);	/* parses same data again */
+	if (WARN_ON(ret < 0))
+		goto out_unlock;
 
 	vfree(entries_tmp);
 	tmp.entries_size = size64;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 25a30be..98ea28d 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2512,6 +2512,11 @@
 	int ret = 1;
 
 	dout("try_write start %p state %lu\n", con, con->state);
+	if (con->state != CON_STATE_PREOPEN &&
+	    con->state != CON_STATE_CONNECTING &&
+	    con->state != CON_STATE_NEGOTIATING &&
+	    con->state != CON_STATE_OPEN)
+		return 0;
 
 more:
 	dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
@@ -2537,6 +2542,8 @@
 	}
 
 more_kvec:
+	BUG_ON(!con->sock);
+
 	/* kvec data queued? */
 	if (con->out_kvec_left) {
 		ret = write_partial_kvec(con);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index a8effc8..5004810 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -209,6 +209,14 @@
 	__open_session(monc);
 }
 
+static void un_backoff(struct ceph_mon_client *monc)
+{
+	monc->hunt_mult /= 2; /* reduce by 50% */
+	if (monc->hunt_mult < 1)
+		monc->hunt_mult = 1;
+	dout("%s hunt_mult now %d\n", __func__, monc->hunt_mult);
+}
+
 /*
  * Reschedule delayed work timer.
  */
@@ -955,6 +963,7 @@
 		if (!monc->hunting) {
 			ceph_con_keepalive(&monc->con);
 			__validate_auth(monc);
+			un_backoff(monc);
 		}
 
 		if (is_auth) {
@@ -1114,9 +1123,8 @@
 		dout("%s found mon%d\n", __func__, monc->cur_mon);
 		monc->hunting = false;
 		monc->had_a_connection = true;
-		monc->hunt_mult /= 2; /* reduce by 50% */
-		if (monc->hunt_mult < 1)
-			monc->hunt_mult = 1;
+		un_backoff(monc);
+		__schedule_delayed(monc);
 	}
 }
 
diff --git a/net/compat.c b/net/compat.c
index a96fd2f..73671e6 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -372,7 +372,8 @@
 	    optname == SO_ATTACH_REUSEPORT_CBPF)
 		return do_set_attach_filter(sock, level, optname,
 					    optval, optlen);
-	if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
+	if (!COMPAT_USE_64BIT_TIME &&
+	    (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
 		return do_set_sock_timeout(sock, level, optname, optval, optlen);
 
 	return sock_setsockopt(sock, level, optname, optval, optlen);
@@ -437,7 +438,8 @@
 static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
 				char __user *optval, int __user *optlen)
 {
-	if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
+	if (!COMPAT_USE_64BIT_TIME &&
+	    (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
 		return do_get_sock_timeout(sock, level, optname, optval, optlen);
 	return sock_getsockopt(sock, level, optname, optval, optlen);
 }
diff --git a/net/core/dev.c b/net/core/dev.c
index 802b3fa..c6a8932 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2873,7 +2873,7 @@
 }
 EXPORT_SYMBOL(passthru_features_check);
 
-static netdev_features_t dflt_features_check(const struct sk_buff *skb,
+static netdev_features_t dflt_features_check(struct sk_buff *skb,
 					     struct net_device *dev,
 					     netdev_features_t features)
 {
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index c0548d2..e3e6a3e 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -57,8 +57,8 @@
 		return -EINVAL;
 
 	list_for_each_entry(ha, &list->list, list) {
-		if (!memcmp(ha->addr, addr, addr_len) &&
-		    ha->type == addr_type) {
+		if (ha->type == addr_type &&
+		    !memcmp(ha->addr, addr, addr_len)) {
 			if (global) {
 				/* check if addr is already used as global */
 				if (ha->global_use)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index cb9a16b..340a3db 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -54,7 +54,8 @@
 static void neigh_timer_handler(unsigned long arg);
 static void __neigh_notify(struct neighbour *n, int type, int flags);
 static void neigh_update_notify(struct neighbour *neigh);
-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
+static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
+				    struct net_device *dev);
 
 static unsigned int neigh_probe_enable;
 #ifdef CONFIG_PROC_FS
@@ -255,8 +256,7 @@
 {
 	write_lock_bh(&tbl->lock);
 	neigh_flush_dev(tbl, dev);
-	pneigh_ifdown(tbl, dev);
-	write_unlock_bh(&tbl->lock);
+	pneigh_ifdown_and_unlock(tbl, dev);
 
 	del_timer_sync(&tbl->proxy_timer);
 	pneigh_queue_purge(&tbl->proxy_queue);
@@ -646,9 +646,10 @@
 	return -ENOENT;
 }
 
-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
+				    struct net_device *dev)
 {
-	struct pneigh_entry *n, **np;
+	struct pneigh_entry *n, **np, *freelist = NULL;
 	u32 h;
 
 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
@@ -656,16 +657,23 @@
 		while ((n = *np) != NULL) {
 			if (!dev || n->dev == dev) {
 				*np = n->next;
-				if (tbl->pdestructor)
-					tbl->pdestructor(n);
-				if (n->dev)
-					dev_put(n->dev);
-				kfree(n);
+				n->next = freelist;
+				freelist = n;
 				continue;
 			}
 			np = &n->next;
 		}
 	}
+	write_unlock_bh(&tbl->lock);
+	while ((n = freelist)) {
+		freelist = n->next;
+		n->next = NULL;
+		if (tbl->pdestructor)
+			tbl->pdestructor(n);
+		if (n->dev)
+			dev_put(n->dev);
+		kfree(n);
+	}
 	return -ENOENT;
 }
 
@@ -2299,12 +2307,16 @@
 
 	err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
 	if (!err) {
-		if (tb[NDA_IFINDEX])
+		if (tb[NDA_IFINDEX]) {
+			if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
+				return -EINVAL;
 			filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
-
-		if (tb[NDA_MASTER])
+		}
+		if (tb[NDA_MASTER]) {
+			if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
+				return -EINVAL;
 			filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
-
+		}
 		if (filter_idx || filter_master_idx)
 			flags |= NLM_F_DUMP_FILTERED;
 	}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 457f882..9b2d611 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -666,7 +666,7 @@
 	int err;
 
 	rtnl_lock();
-	if (np->dev_name) {
+	if (np->dev_name[0]) {
 		struct net *net = current->nsproxy->net_ns;
 		ndev = __dev_get_by_name(net, np->dev_name);
 	}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e2136eb..89f0fbc 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -909,6 +909,7 @@
 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
 	n->cloned = 1;
 	n->nohdr = 0;
+	n->peeked = 0;
 	n->destructor = NULL;
 	C(tail);
 	C(end);
@@ -4480,13 +4481,18 @@
 
 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
 {
+	int mac_len;
+
 	if (skb_cow(skb, skb_headroom(skb)) < 0) {
 		kfree_skb(skb);
 		return NULL;
 	}
 
-	memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
-		2 * ETH_ALEN);
+	mac_len = skb->data - skb_mac_header(skb);
+	if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
+		memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
+			mac_len - VLAN_HLEN - ETH_TLEN);
+	}
 	skb->mac_header += VLAN_HLEN;
 	return skb;
 }
diff --git a/net/core/sock.c b/net/core/sock.c
index 1d88335..0e82197 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1465,7 +1465,7 @@
 
 static void __sk_free(struct sock *sk)
 {
-	if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
+	if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
 		sock_diag_broadcast_destroy(sk);
 	else
 		sk_destruct(sk);
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 7753681..86a2ed0 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -126,6 +126,16 @@
 						  DCCPF_SEQ_WMAX));
 }
 
+static void dccp_tasklet_schedule(struct sock *sk)
+{
+	struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
+
+	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+		sock_hold(sk);
+		__tasklet_schedule(t);
+	}
+}
+
 static void ccid2_hc_tx_rto_expire(unsigned long data)
 {
 	struct sock *sk = (struct sock *)data;
@@ -166,7 +176,7 @@
 
 	/* if we were blocked before, we may now send cwnd=1 packet */
 	if (sender_was_blocked)
-		tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+		dccp_tasklet_schedule(sk);
 	/* restart backed-off timer */
 	sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
 out:
@@ -706,7 +716,7 @@
 done:
 	/* check if incoming Acks allow pending packets to be sent */
 	if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
-		tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+		dccp_tasklet_schedule(sk);
 	dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
 }
 
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 8c7799cd..6697b18 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -620,6 +620,7 @@
 	ireq = inet_rsk(req);
 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
+	ireq->ir_mark = inet_request_mark(sk, skb);
 	ireq->ireq_family = AF_INET;
 	ireq->ir_iif = sk->sk_bound_dev_if;
 
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 28e8252..6cbcf39 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -349,6 +349,7 @@
 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 	ireq->ireq_family = AF_INET6;
+	ireq->ir_mark = inet_request_mark(sk, skb);
 
 	if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 3a2c340..2a952cb 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -230,12 +230,12 @@
 	else
 		dccp_write_xmit(sk);
 	bh_unlock_sock(sk);
+	sock_put(sk);
 }
 
 static void dccp_write_xmit_timer(unsigned long data)
 {
 	dccp_write_xmitlet(data);
-	sock_put((struct sock *)data);
 }
 
 void dccp_init_xmit_timers(struct sock *sk)
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index e1d4d89..f025276 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -25,6 +25,7 @@
 #include <linux/moduleparam.h>
 #include <linux/slab.h>
 #include <linux/string.h>
+#include <linux/ratelimit.h>
 #include <linux/kernel.h>
 #include <linux/keyctl.h>
 #include <linux/err.h>
@@ -91,9 +92,9 @@
 
 			next_opt = memchr(opt, '#', end - opt) ?: end;
 			opt_len = next_opt - opt;
-			if (!opt_len) {
-				printk(KERN_WARNING
-				       "Empty option to dns_resolver key\n");
+			if (opt_len <= 0 || opt_len > 128) {
+				pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
+						    opt_len);
 				return -EINVAL;
 			}
 
@@ -127,10 +128,8 @@
 			}
 
 		bad_option_value:
-			printk(KERN_WARNING
-			       "Option '%*.*s' to dns_resolver key:"
-			       " bad/missing value\n",
-			       opt_nlen, opt_nlen, opt);
+			pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n",
+					    opt_nlen, opt_nlen, opt);
 			return -EINVAL;
 		} while (opt = next_opt + 1, opt < end);
 	}
diff --git a/net/ipc_router/Kconfig b/net/ipc_router/Kconfig
index 30cd45a..9121667 100644
--- a/net/ipc_router/Kconfig
+++ b/net/ipc_router/Kconfig
@@ -23,3 +23,13 @@
 	  once configured with the security rules will ensure that the
 	  sender of the message to a service belongs to the relevant
 	  Linux group as configured by the security script.
+
+config IPC_ROUTER_NODE_ID
+	depends on IPC_ROUTER
+	int "IPC router local NODE ID"
+	default 1
+	help
+	  This option allows to configure the IPC Router NODE ID dynamically.
+	  The NODE defined here is used as the local NODE ID by IPC Router
+	  core and publish the same NODE ID to other NODES present in the
+	  network.
diff --git a/net/ipc_router/ipc_router_core.c b/net/ipc_router/ipc_router_core.c
index 3403108..1a6b539 100644
--- a/net/ipc_router/ipc_router_core.c
+++ b/net/ipc_router/ipc_router_core.c
@@ -137,6 +137,7 @@
 	struct msm_ipc_router_xprt *xprt;
 	u32 remote_node_id;
 	u32 initialized;
+	u32 hello_sent;
 	struct list_head pkt_list;
 	struct wakeup_source ws;
 	struct mutex rx_lock_lhb2; /* lock for xprt rx operations */
@@ -2494,12 +2495,36 @@
 	}
 }
 
+static int send_hello_msg(struct msm_ipc_router_xprt_info *xprt_info)
+{
+	int rc = 0;
+	union rr_control_msg ctl;
+
+	if (!xprt_info->hello_sent) {
+		xprt_info->hello_sent = 1;
+		/* Send a HELLO message */
+		memset(&ctl, 0, sizeof(ctl));
+		ctl.hello.cmd = IPC_ROUTER_CTRL_CMD_HELLO;
+		ctl.hello.checksum = IPC_ROUTER_HELLO_MAGIC;
+		ctl.hello.versions = (uint32_t)IPC_ROUTER_VER_BITMASK;
+		ctl.hello.checksum = ipc_router_calc_checksum(&ctl);
+		rc = ipc_router_send_ctl_msg(xprt_info, &ctl,
+					     IPC_ROUTER_DUMMY_DEST_NODE);
+		if (rc < 0) {
+			xprt_info->hello_sent = 0;
+			IPC_RTR_ERR("%s: Error sending HELLO message\n",
+				    __func__);
+			return rc;
+		}
+	}
+	return rc;
+}
+
 static int process_hello_msg(struct msm_ipc_router_xprt_info *xprt_info,
 			     union rr_control_msg *msg,
 			     struct rr_header_v1 *hdr)
 {
 	int i, rc = 0;
-	union rr_control_msg ctl;
 	struct msm_ipc_routing_table_entry *rt_entry;
 
 	if (!hdr)
@@ -2514,19 +2539,10 @@
 	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
 
 	do_version_negotiation(xprt_info, msg);
-	/* Send a reply HELLO message */
-	memset(&ctl, 0, sizeof(ctl));
-	ctl.hello.cmd = IPC_ROUTER_CTRL_CMD_HELLO;
-	ctl.hello.checksum = IPC_ROUTER_HELLO_MAGIC;
-	ctl.hello.versions = (u32)IPC_ROUTER_VER_BITMASK;
-	ctl.hello.checksum = ipc_router_calc_checksum(&ctl);
-	rc = ipc_router_send_ctl_msg(xprt_info, &ctl,
-				     IPC_ROUTER_DUMMY_DEST_NODE);
-	if (rc < 0) {
-		IPC_RTR_ERR("%s: Error sending reply HELLO message\n",
-			    __func__);
+	rc = send_hello_msg(xprt_info);
+	if (rc < 0)
 		return rc;
-	}
+
 	xprt_info->initialized = 1;
 
 	/* Send list of servers from the local node and from nodes
@@ -4068,6 +4084,7 @@
 
 	xprt_info->xprt = xprt;
 	xprt_info->initialized = 0;
+	xprt_info->hello_sent = 0;
 	xprt_info->remote_node_id = -1;
 	INIT_LIST_HEAD(&xprt_info->pkt_list);
 	mutex_init(&xprt_info->rx_lock_lhb2);
@@ -4109,6 +4126,7 @@
 	up_write(&routing_table_lock_lha3);
 
 	xprt->priv = xprt_info;
+	send_hello_msg(xprt_info);
 
 	return 0;
 }
diff --git a/net/ipc_router/ipc_router_private.h b/net/ipc_router/ipc_router_private.h
index 3ec9818..6e0c4be 100644
--- a/net/ipc_router/ipc_router_private.h
+++ b/net/ipc_router/ipc_router_private.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2016, 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -37,7 +37,7 @@
 
 #define IPC_ROUTER_ADDRESS			0x0000FFFF
 
-#define IPC_ROUTER_NID_LOCAL			1
+#define IPC_ROUTER_NID_LOCAL	CONFIG_IPC_ROUTER_NODE_ID
 #define MAX_IPC_PKT_SIZE 66000
 
 #define IPC_ROUTER_LOW_RX_QUOTA		5
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index ddcd56c..a6b34ac 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -182,6 +182,7 @@
 		tw->tw_dport	    = inet->inet_dport;
 		tw->tw_family	    = sk->sk_family;
 		tw->tw_reuse	    = sk->sk_reuse;
+		tw->tw_reuseport    = sk->sk_reuseport;
 		tw->tw_hash	    = sk->sk_hash;
 		tw->tw_ipv6only	    = 0;
 		tw->tw_transparent  = inet->transparent;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index e60f9fa..d0bd98f 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1098,7 +1098,8 @@
 		if (copy > length)
 			copy = length;
 
-		if (!(rt->dst.dev->features&NETIF_F_SG)) {
+		if (!(rt->dst.dev->features&NETIF_F_SG) &&
+		    skb_tailroom(skb) >= copy) {
 			unsigned int off;
 
 			off = skb->len;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index b120b9b..cbff0d6 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -396,7 +396,6 @@
 	memcpy(dev->dev_addr, &iph->saddr, 4);
 	memcpy(dev->broadcast, &iph->daddr, 4);
 
-	dev->hard_header_len	= LL_MAX_HEADER + sizeof(struct iphdr);
 	dev->mtu		= ETH_DATA_LEN;
 	dev->flags		= IFF_NOARP;
 	dev->addr_len		= 4;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 93bfadf..8fa153c 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -775,8 +775,10 @@
 	ipc.addr = faddr = daddr;
 
 	if (ipc.opt && ipc.opt->opt.srr) {
-		if (!daddr)
-			return -EINVAL;
+		if (!daddr) {
+			err = -EINVAL;
+			goto out_free;
+		}
 		faddr = ipc.opt->opt.faddr;
 	}
 	tos = get_rttos(&ipc, inet);
@@ -842,6 +844,7 @@
 
 out:
 	ip_rt_put(rt);
+out_free:
 	if (free)
 		kfree(ipc.opt);
 	if (!err) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e83fdf6..583967b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -621,6 +621,7 @@
 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
 {
 	rt->rt_pmtu = fnhe->fnhe_pmtu;
+	rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
 	rt->dst.expires = fnhe->fnhe_expires;
 
 	if (fnhe->fnhe_gw) {
@@ -631,7 +632,7 @@
 }
 
 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
-				  u32 pmtu, unsigned long expires)
+				  u32 pmtu, bool lock, unsigned long expires)
 {
 	struct fnhe_hash_bucket *hash;
 	struct fib_nh_exception *fnhe;
@@ -668,8 +669,10 @@
 			fnhe->fnhe_genid = genid;
 		if (gw)
 			fnhe->fnhe_gw = gw;
-		if (pmtu)
+		if (pmtu) {
 			fnhe->fnhe_pmtu = pmtu;
+			fnhe->fnhe_mtu_locked = lock;
+		}
 		fnhe->fnhe_expires = max(1UL, expires);
 		/* Update all cached dsts too */
 		rt = rcu_dereference(fnhe->fnhe_rth_input);
@@ -693,6 +696,7 @@
 		fnhe->fnhe_daddr = daddr;
 		fnhe->fnhe_gw = gw;
 		fnhe->fnhe_pmtu = pmtu;
+		fnhe->fnhe_mtu_locked = lock;
 		fnhe->fnhe_expires = expires;
 
 		/* Exception created; mark the cached routes for the nexthop
@@ -774,7 +778,8 @@
 				struct fib_nh *nh = &FIB_RES_NH(res);
 
 				update_or_create_fnhe(nh, fl4->daddr, new_gw,
-						0, jiffies + ip_rt_gc_timeout);
+						0, false,
+						jiffies + ip_rt_gc_timeout);
 			}
 			if (kill_route)
 				rt->dst.obsolete = DST_OBSOLETE_KILL;
@@ -987,15 +992,18 @@
 {
 	struct dst_entry *dst = &rt->dst;
 	struct fib_result res;
+	bool lock = false;
 
-	if (dst_metric_locked(dst, RTAX_MTU))
+	if (ip_mtu_locked(dst))
 		return;
 
 	if (ipv4_mtu(dst) < mtu)
 		return;
 
-	if (mtu < ip_rt_min_pmtu)
+	if (mtu < ip_rt_min_pmtu) {
+		lock = true;
 		mtu = ip_rt_min_pmtu;
+	}
 
 	if (rt->rt_pmtu == mtu &&
 	    time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
@@ -1005,7 +1013,7 @@
 	if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
 		struct fib_nh *nh = &FIB_RES_NH(res);
 
-		update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
+		update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
 				      jiffies + ip_rt_mtu_expires);
 	}
 	rcu_read_unlock();
@@ -1262,7 +1270,7 @@
 
 	mtu = READ_ONCE(dst->dev->mtu);
 
-	if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
+	if (unlikely(ip_mtu_locked(dst))) {
 		if (rt->rt_uses_gateway && mtu > 576)
 			mtu = 576;
 	}
@@ -1487,6 +1495,7 @@
 		rt->rt_is_input = 0;
 		rt->rt_iif = 0;
 		rt->rt_pmtu = 0;
+		rt->rt_mtu_locked = 0;
 		rt->rt_gateway = 0;
 		rt->rt_uses_gateway = 0;
 		rt->rt_table_id = 0;
@@ -2410,6 +2419,7 @@
 		rt->rt_is_input = ort->rt_is_input;
 		rt->rt_iif = ort->rt_iif;
 		rt->rt_pmtu = ort->rt_pmtu;
+		rt->rt_mtu_locked = ort->rt_mtu_locked;
 
 		rt->rt_genid = rt_genid_ipv4(net);
 		rt->rt_flags = ort->rt_flags;
@@ -2512,6 +2522,8 @@
 	memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
 	if (rt->rt_pmtu && expires)
 		metrics[RTAX_MTU - 1] = rt->rt_pmtu;
+	if (rt->rt_mtu_locked && expires)
+		metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
 	if (rtnetlink_put_metrics(skb, metrics) < 0)
 		goto nla_put_failure;
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index af0b324..fdfaaf0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1146,7 +1146,8 @@
 	lock_sock(sk);
 
 	flags = msg->msg_flags;
-	if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) {
+	if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
+	    !tp->repair) {
 		err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
 		if (err == -EINPROGRESS && copied_syn > 0)
 			goto out;
@@ -2558,7 +2559,7 @@
 	case TCP_REPAIR_QUEUE:
 		if (!tp->repair)
 			err = -EPERM;
-		else if (val < TCP_QUEUES_NR)
+		else if ((unsigned int)val < TCP_QUEUES_NR)
 			tp->repair_queue = val;
 		else
 			err = -EINVAL;
@@ -2697,8 +2698,10 @@
 
 #ifdef CONFIG_TCP_MD5SIG
 	case TCP_MD5SIG:
-		/* Read the IP->Key mappings from userspace */
-		err = tp->af_specific->md5_parse(sk, optval, optlen);
+		if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
+			err = tp->af_specific->md5_parse(sk, optval, optlen);
+		else
+			err = -EINVAL;
 		break;
 #endif
 	case TCP_USER_TIMEOUT:
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 8ec6053..91698595 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -773,7 +773,9 @@
 			}
 		}
 	}
-	bbr->idle_restart = 0;
+	/* Restart after idle ends only once we process a new S/ACK for data */
+	if (rs->delivered > 0)
+		bbr->idle_restart = 0;
 }
 
 static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index c8e6d86..95ca887 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -6,7 +6,7 @@
  * The algorithm is described in:
  * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
  *  for High-Speed Networks"
- * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf
+ * http://tamerbasar.csl.illinois.edu/LiuBasarSrikantPerfEvalArtJun2008.pdf
  *
  * Implemented from description in paper and ns-2 simulation.
  * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 16d3619..c2ad59d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3944,11 +3944,8 @@
 	int length = (th->doff << 2) - sizeof(*th);
 	const u8 *ptr = (const u8 *)(th + 1);
 
-	/* If the TCP option is too short, we can short cut */
-	if (length < TCPOLEN_MD5SIG)
-		return NULL;
-
-	while (length > 0) {
+	/* If not enough data remaining, we can short cut */
+	while (length >= TCPOLEN_MD5SIG) {
 		int opcode = *ptr++;
 		int opsize;
 
diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c
index e45e2c4..37a3cb9 100644
--- a/net/ipv4/tcp_nv.c
+++ b/net/ipv4/tcp_nv.c
@@ -338,7 +338,7 @@
 		 */
 		cwnd_by_slope = (u32)
 			div64_u64(((u64)ca->nv_rtt_max_rate) * ca->nv_min_rtt,
-				  (u64)(80000 * tp->mss_cache));
+				  80000ULL * tp->mss_cache);
 		max_win = cwnd_by_slope + nv_pad;
 
 		/* If cwnd > max_win, decrease cwnd
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 16a473a..70c7212 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2691,8 +2691,10 @@
 		return -EBUSY;
 
 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
-		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
-			BUG();
+		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
+			WARN_ON_ONCE(1);
+			return -EINVAL;
+		}
 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
 			return -ENOMEM;
 	}
@@ -3236,6 +3238,7 @@
 	sock_reset_flag(sk, SOCK_DONE);
 	tp->snd_wnd = 0;
 	tcp_init_wl(tp, 0);
+	tcp_write_queue_purge(sk);
 	tp->snd_una = tp->write_seq;
 	tp->snd_sml = tp->write_seq;
 	tp->snd_up = tp->write_seq;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5af27b9..885cc39 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -987,8 +987,10 @@
 	sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
 
 	if (ipc.opt && ipc.opt->opt.srr) {
-		if (!daddr)
-			return -EINVAL;
+		if (!daddr) {
+			err = -EINVAL;
+			goto out_free;
+		}
 		faddr = ipc.opt->opt.faddr;
 		connected = 0;
 	}
@@ -1096,6 +1098,7 @@
 
 out:
 	ip_rt_put(rt);
+out_free:
 	if (free)
 		kfree(ipc.opt);
 	if (!err)
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 7f9a8df..e62d76c9 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -101,6 +101,7 @@
 	xdst->u.rt.rt_gateway = rt->rt_gateway;
 	xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
 	xdst->u.rt.rt_pmtu = rt->rt_pmtu;
+	xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked;
 	xdst->u.rt.rt_table_id = rt->rt_table_id;
 	INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
 
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index de0188e..0b5a75b 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1553,7 +1553,8 @@
 		if (copy > length)
 			copy = length;
 
-		if (!(rt->dst.dev->features&NETIF_F_SG)) {
+		if (!(rt->dst.dev->features&NETIF_F_SG) &&
+		    skb_tailroom(skb) >= copy) {
 			unsigned int off;
 
 			off = skb->len;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index f338848..5603410 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1976,14 +1976,14 @@
 {
 	struct net *net = dev_net(dev);
 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
-	struct ip6_tnl *nt, *t;
 	struct ip_tunnel_encap ipencap;
+	struct ip6_tnl *nt, *t;
+	int err;
 
 	nt = netdev_priv(dev);
 
 	if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
-		int err = ip6_tnl_encap_setup(nt, &ipencap);
-
+		err = ip6_tnl_encap_setup(nt, &ipencap);
 		if (err < 0)
 			return err;
 	}
@@ -1999,7 +1999,11 @@
 			return -EEXIST;
 	}
 
-	return ip6_tnl_create2(dev);
+	err = ip6_tnl_create2(dev);
+	if (!err && tb[IFLA_MTU])
+		ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
+
+	return err;
 }
 
 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 96be019..0fbc5ba 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2776,6 +2776,7 @@
 
 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
 	[RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
+	[RTA_PREFSRC]		= { .len = sizeof(struct in6_addr) },
 	[RTA_OIF]               = { .type = NLA_U32 },
 	[RTA_IIF]		= { .type = NLA_U32 },
 	[RTA_PRIORITY]          = { .type = NLA_U32 },
@@ -2786,6 +2787,7 @@
 	[RTA_ENCAP]		= { .type = NLA_NESTED },
 	[RTA_EXPIRES]		= { .type = NLA_U32 },
 	[RTA_UID]		= { .type = NLA_U32 },
+	[RTA_TABLE]		= { .type = NLA_U32 },
 };
 
 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index dcb2921..ae0485d 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1572,6 +1572,13 @@
 	if (err < 0)
 		return err;
 
+	if (tb[IFLA_MTU]) {
+		u32 mtu = nla_get_u32(tb[IFLA_MTU]);
+
+		if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len)
+			dev->mtu = mtu;
+	}
+
 #ifdef CONFIG_IPV6_SIT_6RD
 	if (ipip6_netlink_6rd_parms(data, &ip6rd))
 		err = ipip6_tunnel_update_6rd(nt, &ip6rd);
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 63e6d08..cc306de 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1424,6 +1424,7 @@
 	 */
 	if (csk->sk_user_data) {
 		write_unlock_bh(&csk->sk_callback_lock);
+		strp_stop(&psock->strp);
 		strp_done(&psock->strp);
 		kmem_cache_free(kcm_psockp, psock);
 		err = -EALREADY;
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index ce12384..ee03bc8 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -750,8 +750,6 @@
 
 	if ((session->ifname[0] &&
 	     nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
-	    (session->offset &&
-	     nla_put_u16(skb, L2TP_ATTR_OFFSET, session->offset)) ||
 	    (session->cookie_len &&
 	     nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
 		     &session->cookie[0])) ||
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 163f1fa..9b214f3 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -590,6 +590,13 @@
 	lock_sock(sk);
 
 	error = -EINVAL;
+
+	if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
+	    sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
+	    sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
+	    sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
+		goto end;
+
 	if (sp->sa_protocol != PX_PROTO_OL2TP)
 		goto end;
 
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index f7caf0f..85aae8c 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -197,9 +197,19 @@
 		llc->laddr.lsap, llc->daddr.lsap);
 	if (!llc_send_disc(sk))
 		llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
-	if (!sock_flag(sk, SOCK_ZAPPED))
+	if (!sock_flag(sk, SOCK_ZAPPED)) {
+		struct llc_sap *sap = llc->sap;
+
+		/* Hold this for release_sock(), so that llc_backlog_rcv()
+		 * could still use it.
+		 */
+		llc_sap_hold(sap);
 		llc_sap_remove_socket(llc->sap, sk);
-	release_sock(sk);
+		release_sock(sk);
+		llc_sap_put(sap);
+	} else {
+		release_sock(sk);
+	}
 	if (llc->dev)
 		dev_put(llc->dev);
 	sock_put(sk);
@@ -916,6 +926,9 @@
 	if (size > llc->dev->mtu)
 		size = llc->dev->mtu;
 	copied = size - hdrlen;
+	rc = -EINVAL;
+	if (copied < 0)
+		goto release;
 	release_sock(sk);
 	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
 	lock_sock(sk);
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index ea225bd..4b60f68 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -389,7 +389,7 @@
 	llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
 	rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
 	if (likely(!rc)) {
-		llc_conn_send_pdu(sk, skb);
+		rc = llc_conn_send_pdu(sk, skb);
 		llc_conn_ac_inc_vs_by_1(sk, skb);
 	}
 	return rc;
@@ -916,7 +916,7 @@
 	llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR);
 	rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
 	if (likely(!rc)) {
-		llc_conn_send_pdu(sk, skb);
+		rc = llc_conn_send_pdu(sk, skb);
 		llc_conn_ac_inc_vs_by_1(sk, skb);
 	}
 	return rc;
@@ -935,14 +935,17 @@
 int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb)
 {
 	struct llc_sock *llc = llc_sk(sk);
+	int ret;
 
 	if (llc->ack_must_be_send) {
-		llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb);
+		ret = llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb);
 		llc->ack_must_be_send = 0 ;
 		llc->ack_pf = 0;
-	} else
-		llc_conn_ac_send_i_cmd_p_set_0(sk, skb);
-	return 0;
+	} else {
+		ret = llc_conn_ac_send_i_cmd_p_set_0(sk, skb);
+	}
+
+	return ret;
 }
 
 /**
@@ -1096,14 +1099,7 @@
 
 int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
 {
-	struct llc_sock *llc = llc_sk(sk);
-
-	del_timer(&llc->pf_cycle_timer.timer);
-	del_timer(&llc->ack_timer.timer);
-	del_timer(&llc->rej_sent_timer.timer);
-	del_timer(&llc->busy_state_timer.timer);
-	llc->ack_must_be_send = 0;
-	llc->ack_pf = 0;
+	llc_sk_stop_all_timers(sk, false);
 	return 0;
 }
 
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 8bc5a1b..79c346f 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -30,7 +30,7 @@
 #endif
 
 static int llc_find_offset(int state, int ev_type);
-static void llc_conn_send_pdus(struct sock *sk);
+static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb);
 static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
 static int llc_exec_conn_trans_actions(struct sock *sk,
 				       struct llc_conn_state_trans *trans,
@@ -193,11 +193,11 @@
 	return rc;
 }
 
-void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
+int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
 {
 	/* queue PDU to send to MAC layer */
 	skb_queue_tail(&sk->sk_write_queue, skb);
-	llc_conn_send_pdus(sk);
+	return llc_conn_send_pdus(sk, skb);
 }
 
 /**
@@ -255,7 +255,7 @@
 	if (howmany_resend > 0)
 		llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
 	/* any PDUs to re-send are queued up; start sending to MAC */
-	llc_conn_send_pdus(sk);
+	llc_conn_send_pdus(sk, NULL);
 out:;
 }
 
@@ -296,7 +296,7 @@
 	if (howmany_resend > 0)
 		llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
 	/* any PDUs to re-send are queued up; start sending to MAC */
-	llc_conn_send_pdus(sk);
+	llc_conn_send_pdus(sk, NULL);
 out:;
 }
 
@@ -340,12 +340,16 @@
 /**
  *	llc_conn_send_pdus - Sends queued PDUs
  *	@sk: active connection
+ *	@hold_skb: the skb held by caller, or NULL if does not care
  *
- *	Sends queued pdus to MAC layer for transmission.
+ *	Sends queued pdus to MAC layer for transmission. When @hold_skb is
+ *	NULL, always return 0. Otherwise, return 0 if @hold_skb is sent
+ *	successfully, or 1 for failure.
  */
-static void llc_conn_send_pdus(struct sock *sk)
+static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
 {
 	struct sk_buff *skb;
+	int ret = 0;
 
 	while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
 		struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
@@ -357,10 +361,20 @@
 			skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
 			if (!skb2)
 				break;
-			skb = skb2;
+			dev_queue_xmit(skb2);
+		} else {
+			bool is_target = skb == hold_skb;
+			int rc;
+
+			if (is_target)
+				skb_get(skb);
+			rc = dev_queue_xmit(skb);
+			if (is_target)
+				ret = rc;
 		}
-		dev_queue_xmit(skb);
 	}
+
+	return ret;
 }
 
 /**
@@ -951,6 +965,26 @@
 	return sk;
 }
 
+void llc_sk_stop_all_timers(struct sock *sk, bool sync)
+{
+	struct llc_sock *llc = llc_sk(sk);
+
+	if (sync) {
+		del_timer_sync(&llc->pf_cycle_timer.timer);
+		del_timer_sync(&llc->ack_timer.timer);
+		del_timer_sync(&llc->rej_sent_timer.timer);
+		del_timer_sync(&llc->busy_state_timer.timer);
+	} else {
+		del_timer(&llc->pf_cycle_timer.timer);
+		del_timer(&llc->ack_timer.timer);
+		del_timer(&llc->rej_sent_timer.timer);
+		del_timer(&llc->busy_state_timer.timer);
+	}
+
+	llc->ack_must_be_send = 0;
+	llc->ack_pf = 0;
+}
+
 /**
  *	llc_sk_free - Frees a LLC socket
  *	@sk - socket to free
@@ -963,7 +997,7 @@
 
 	llc->state = LLC_CONN_OUT_OF_SVC;
 	/* Stop all (possibly) running timers */
-	llc_conn_ac_stop_all_timers(sk, NULL);
+	llc_sk_stop_all_timers(sk, true);
 #ifdef DEBUG_LLC_CONN_ALLOC
 	printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
 		skb_queue_len(&llc->pdu_unack_q),
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 404284a..474655a 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3907,7 +3907,7 @@
 	if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
 					      IEEE80211_FCTL_TODS)) !=
 	    fast_rx->expected_ds_bits)
-		goto drop;
+		return false;
 
 	/* assign the key to drop unencrypted frames (later)
 	 * and strip the IV/MIC if necessary
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 97f4c9d..9249712 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -8,6 +8,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2008, Intel Corporation
  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (C) 2018        Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -27,7 +28,7 @@
 				 u32 sta_flags, u8 *bssid,
 				 struct ieee80211_csa_ie *csa_ie)
 {
-	enum nl80211_band new_band;
+	enum nl80211_band new_band = current_band;
 	int new_freq;
 	u8 new_chan_no;
 	struct ieee80211_channel *new_chan;
@@ -53,15 +54,13 @@
 				elems->ext_chansw_ie->new_operating_class,
 				&new_band)) {
 			sdata_info(sdata,
-				   "cannot understand ECSA IE operating class %d, disconnecting\n",
+				   "cannot understand ECSA IE operating class, %d, ignoring\n",
 				   elems->ext_chansw_ie->new_operating_class);
-			return -EINVAL;
 		}
 		new_chan_no = elems->ext_chansw_ie->new_ch_num;
 		csa_ie->count = elems->ext_chansw_ie->count;
 		csa_ie->mode = elems->ext_chansw_ie->mode;
 	} else if (elems->ch_switch_ie) {
-		new_band = current_band;
 		new_chan_no = elems->ch_switch_ie->new_ch_num;
 		csa_ie->count = elems->ch_switch_ie->count;
 		csa_ie->mode = elems->ch_switch_ie->mode;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 1ecf3d0..892c392 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -313,7 +313,7 @@
 
 	if (ieee80211_hw_check(hw, USES_RSS)) {
 		sta->pcpu_rx_stats =
-			alloc_percpu(struct ieee80211_sta_rx_stats);
+			alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp);
 		if (!sta->pcpu_rx_stats)
 			goto free;
 	}
@@ -433,6 +433,7 @@
 	if (sta->sta.txq[0])
 		kfree(to_txq_info(sta->sta.txq[0]));
 free:
+	free_percpu(sta->pcpu_rx_stats);
 #ifdef CONFIG_MAC80211_MESH
 	kfree(sta->mesh);
 #endif
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 74d1195..c5f2350 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2393,11 +2393,7 @@
 			strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
 				sizeof(cfg.mcast_ifn));
 			cfg.syncid = dm->syncid;
-			rtnl_lock();
-			mutex_lock(&ipvs->sync_mutex);
 			ret = start_sync_thread(ipvs, &cfg, dm->state);
-			mutex_unlock(&ipvs->sync_mutex);
-			rtnl_unlock();
 		} else {
 			mutex_lock(&ipvs->sync_mutex);
 			ret = stop_sync_thread(ipvs, dm->state);
@@ -3495,12 +3491,8 @@
 	if (ipvs->mixed_address_family_dests > 0)
 		return -EINVAL;
 
-	rtnl_lock();
-	mutex_lock(&ipvs->sync_mutex);
 	ret = start_sync_thread(ipvs, &c,
 				nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
-	mutex_unlock(&ipvs->sync_mutex);
-	rtnl_unlock();
 	return ret;
 }
 
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 9350530..5fbf4b2 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -48,6 +48,7 @@
 #include <linux/kthread.h>
 #include <linux/wait.h>
 #include <linux/kernel.h>
+#include <linux/sched.h>
 
 #include <asm/unaligned.h>		/* Used for ntoh_seq and hton_seq */
 
@@ -1359,15 +1360,9 @@
 /*
  *      Specifiy default interface for outgoing multicasts
  */
-static int set_mcast_if(struct sock *sk, char *ifname)
+static int set_mcast_if(struct sock *sk, struct net_device *dev)
 {
-	struct net_device *dev;
 	struct inet_sock *inet = inet_sk(sk);
-	struct net *net = sock_net(sk);
-
-	dev = __dev_get_by_name(net, ifname);
-	if (!dev)
-		return -ENODEV;
 
 	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
 		return -EINVAL;
@@ -1395,19 +1390,14 @@
  *      in the in_addr structure passed in as a parameter.
  */
 static int
-join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
+join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev)
 {
-	struct net *net = sock_net(sk);
 	struct ip_mreqn mreq;
-	struct net_device *dev;
 	int ret;
 
 	memset(&mreq, 0, sizeof(mreq));
 	memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
 
-	dev = __dev_get_by_name(net, ifname);
-	if (!dev)
-		return -ENODEV;
 	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
 		return -EINVAL;
 
@@ -1422,15 +1412,10 @@
 
 #ifdef CONFIG_IP_VS_IPV6
 static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
-			     char *ifname)
+			     struct net_device *dev)
 {
-	struct net *net = sock_net(sk);
-	struct net_device *dev;
 	int ret;
 
-	dev = __dev_get_by_name(net, ifname);
-	if (!dev)
-		return -ENODEV;
 	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
 		return -EINVAL;
 
@@ -1442,24 +1427,18 @@
 }
 #endif
 
-static int bind_mcastif_addr(struct socket *sock, char *ifname)
+static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
 {
-	struct net *net = sock_net(sock->sk);
-	struct net_device *dev;
 	__be32 addr;
 	struct sockaddr_in sin;
 
-	dev = __dev_get_by_name(net, ifname);
-	if (!dev)
-		return -ENODEV;
-
 	addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
 	if (!addr)
 		pr_err("You probably need to specify IP address on "
 		       "multicast interface.\n");
 
 	IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
-		  ifname, &addr);
+		  dev->name, &addr);
 
 	/* Now bind the socket with the address of multicast interface */
 	sin.sin_family	     = AF_INET;
@@ -1492,7 +1471,8 @@
 /*
  *      Set up sending multicast socket over UDP
  */
-static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
+static int make_send_sock(struct netns_ipvs *ipvs, int id,
+			  struct net_device *dev, struct socket **sock_ret)
 {
 	/* multicast addr */
 	union ipvs_sockaddr mcast_addr;
@@ -1504,9 +1484,10 @@
 				  IPPROTO_UDP, &sock);
 	if (result < 0) {
 		pr_err("Error during creation of socket; terminating\n");
-		return ERR_PTR(result);
+		goto error;
 	}
-	result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn);
+	*sock_ret = sock;
+	result = set_mcast_if(sock->sk, dev);
 	if (result < 0) {
 		pr_err("Error setting outbound mcast interface\n");
 		goto error;
@@ -1521,7 +1502,7 @@
 		set_sock_size(sock->sk, 1, result);
 
 	if (AF_INET == ipvs->mcfg.mcast_af)
-		result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn);
+		result = bind_mcastif_addr(sock, dev);
 	else
 		result = 0;
 	if (result < 0) {
@@ -1537,19 +1518,18 @@
 		goto error;
 	}
 
-	return sock;
+	return 0;
 
 error:
-	sock_release(sock);
-	return ERR_PTR(result);
+	return result;
 }
 
 
 /*
  *      Set up receiving multicast socket over UDP
  */
-static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
-					int ifindex)
+static int make_receive_sock(struct netns_ipvs *ipvs, int id,
+			     struct net_device *dev, struct socket **sock_ret)
 {
 	/* multicast addr */
 	union ipvs_sockaddr mcast_addr;
@@ -1561,8 +1541,9 @@
 				  IPPROTO_UDP, &sock);
 	if (result < 0) {
 		pr_err("Error during creation of socket; terminating\n");
-		return ERR_PTR(result);
+		goto error;
 	}
+	*sock_ret = sock;
 	/* it is equivalent to the REUSEADDR option in user-space */
 	sock->sk->sk_reuse = SK_CAN_REUSE;
 	result = sysctl_sync_sock_size(ipvs);
@@ -1570,7 +1551,7 @@
 		set_sock_size(sock->sk, 0, result);
 
 	get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
-	sock->sk->sk_bound_dev_if = ifindex;
+	sock->sk->sk_bound_dev_if = dev->ifindex;
 	result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
 	if (result < 0) {
 		pr_err("Error binding to the multicast addr\n");
@@ -1581,21 +1562,20 @@
 #ifdef CONFIG_IP_VS_IPV6
 	if (ipvs->bcfg.mcast_af == AF_INET6)
 		result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
-					   ipvs->bcfg.mcast_ifn);
+					   dev);
 	else
 #endif
 		result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
-					  ipvs->bcfg.mcast_ifn);
+					  dev);
 	if (result < 0) {
 		pr_err("Error joining to the multicast group\n");
 		goto error;
 	}
 
-	return sock;
+	return 0;
 
 error:
-	sock_release(sock);
-	return ERR_PTR(result);
+	return result;
 }
 
 
@@ -1780,13 +1760,12 @@
 int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
 		      int state)
 {
-	struct ip_vs_sync_thread_data *tinfo;
+	struct ip_vs_sync_thread_data *tinfo = NULL;
 	struct task_struct **array = NULL, *task;
-	struct socket *sock;
 	struct net_device *dev;
 	char *name;
 	int (*threadfn)(void *data);
-	int id, count, hlen;
+	int id = 0, count, hlen;
 	int result = -ENOMEM;
 	u16 mtu, min_mtu;
 
@@ -1794,6 +1773,18 @@
 	IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
 		  sizeof(struct ip_vs_sync_conn_v0));
 
+	/* Do not hold one mutex and then to block on another */
+	for (;;) {
+		rtnl_lock();
+		if (mutex_trylock(&ipvs->sync_mutex))
+			break;
+		rtnl_unlock();
+		mutex_lock(&ipvs->sync_mutex);
+		if (rtnl_trylock())
+			break;
+		mutex_unlock(&ipvs->sync_mutex);
+	}
+
 	if (!ipvs->sync_state) {
 		count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
 		ipvs->threads_mask = count - 1;
@@ -1812,7 +1803,8 @@
 	dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
 	if (!dev) {
 		pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
-		return -ENODEV;
+		result = -ENODEV;
+		goto out_early;
 	}
 	hlen = (AF_INET6 == c->mcast_af) ?
 	       sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
@@ -1829,26 +1821,30 @@
 		c->sync_maxlen = mtu - hlen;
 
 	if (state == IP_VS_STATE_MASTER) {
+		result = -EEXIST;
 		if (ipvs->ms)
-			return -EEXIST;
+			goto out_early;
 
 		ipvs->mcfg = *c;
 		name = "ipvs-m:%d:%d";
 		threadfn = sync_thread_master;
 	} else if (state == IP_VS_STATE_BACKUP) {
+		result = -EEXIST;
 		if (ipvs->backup_threads)
-			return -EEXIST;
+			goto out_early;
 
 		ipvs->bcfg = *c;
 		name = "ipvs-b:%d:%d";
 		threadfn = sync_thread_backup;
 	} else {
-		return -EINVAL;
+		result = -EINVAL;
+		goto out_early;
 	}
 
 	if (state == IP_VS_STATE_MASTER) {
 		struct ipvs_master_sync_state *ms;
 
+		result = -ENOMEM;
 		ipvs->ms = kzalloc(count * sizeof(ipvs->ms[0]), GFP_KERNEL);
 		if (!ipvs->ms)
 			goto out;
@@ -1864,39 +1860,38 @@
 	} else {
 		array = kzalloc(count * sizeof(struct task_struct *),
 				GFP_KERNEL);
+		result = -ENOMEM;
 		if (!array)
 			goto out;
 	}
 
-	tinfo = NULL;
 	for (id = 0; id < count; id++) {
-		if (state == IP_VS_STATE_MASTER)
-			sock = make_send_sock(ipvs, id);
-		else
-			sock = make_receive_sock(ipvs, id, dev->ifindex);
-		if (IS_ERR(sock)) {
-			result = PTR_ERR(sock);
-			goto outtinfo;
-		}
+		result = -ENOMEM;
 		tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
 		if (!tinfo)
-			goto outsocket;
+			goto out;
 		tinfo->ipvs = ipvs;
-		tinfo->sock = sock;
+		tinfo->sock = NULL;
 		if (state == IP_VS_STATE_BACKUP) {
 			tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
 					     GFP_KERNEL);
 			if (!tinfo->buf)
-				goto outtinfo;
+				goto out;
 		} else {
 			tinfo->buf = NULL;
 		}
 		tinfo->id = id;
+		if (state == IP_VS_STATE_MASTER)
+			result = make_send_sock(ipvs, id, dev, &tinfo->sock);
+		else
+			result = make_receive_sock(ipvs, id, dev, &tinfo->sock);
+		if (result < 0)
+			goto out;
 
 		task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
 		if (IS_ERR(task)) {
 			result = PTR_ERR(task);
-			goto outtinfo;
+			goto out;
 		}
 		tinfo = NULL;
 		if (state == IP_VS_STATE_MASTER)
@@ -1913,20 +1908,20 @@
 	ipvs->sync_state |= state;
 	spin_unlock_bh(&ipvs->sync_buff_lock);
 
+	mutex_unlock(&ipvs->sync_mutex);
+	rtnl_unlock();
+
 	/* increase the module use count */
 	ip_vs_use_count_inc();
 
 	return 0;
 
-outsocket:
-	sock_release(sock);
-
-outtinfo:
-	if (tinfo) {
-		sock_release(tinfo->sock);
-		kfree(tinfo->buf);
-		kfree(tinfo);
-	}
+out:
+	/* We do not need RTNL lock anymore, release it here so that
+	 * sock_release below and in the kthreads can use rtnl_lock
+	 * to leave the mcast group.
+	 */
+	rtnl_unlock();
 	count = id;
 	while (count-- > 0) {
 		if (state == IP_VS_STATE_MASTER)
@@ -1934,13 +1929,23 @@
 		else
 			kthread_stop(array[count]);
 	}
-	kfree(array);
-
-out:
 	if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
 		kfree(ipvs->ms);
 		ipvs->ms = NULL;
 	}
+	mutex_unlock(&ipvs->sync_mutex);
+	if (tinfo) {
+		if (tinfo->sock)
+			sock_release(tinfo->sock);
+		kfree(tinfo->buf);
+		kfree(tinfo);
+	}
+	kfree(array);
+	return result;
+
+out_early:
+	mutex_unlock(&ipvs->sync_mutex);
+	rtnl_unlock();
 	return result;
 }
 
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index fa3ef25..762f31f 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2200,41 +2200,46 @@
 	}
 
 	if (nlh->nlmsg_flags & NLM_F_REPLACE) {
-		if (nft_is_active_next(net, old_rule)) {
-			trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
-						   old_rule);
-			if (trans == NULL) {
-				err = -ENOMEM;
-				goto err2;
-			}
-			nft_deactivate_next(net, old_rule);
-			chain->use--;
-			list_add_tail_rcu(&rule->list, &old_rule->list);
-		} else {
+		if (!nft_is_active_next(net, old_rule)) {
 			err = -ENOENT;
 			goto err2;
 		}
-	} else if (nlh->nlmsg_flags & NLM_F_APPEND)
-		if (old_rule)
-			list_add_rcu(&rule->list, &old_rule->list);
-		else
-			list_add_tail_rcu(&rule->list, &chain->rules);
-	else {
-		if (old_rule)
-			list_add_tail_rcu(&rule->list, &old_rule->list);
-		else
-			list_add_rcu(&rule->list, &chain->rules);
-	}
+		trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
+					   old_rule);
+		if (trans == NULL) {
+			err = -ENOMEM;
+			goto err2;
+		}
+		nft_deactivate_next(net, old_rule);
+		chain->use--;
 
-	if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
-		err = -ENOMEM;
-		goto err3;
+		if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
+			err = -ENOMEM;
+			goto err2;
+		}
+
+		list_add_tail_rcu(&rule->list, &old_rule->list);
+	} else {
+		if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
+			err = -ENOMEM;
+			goto err2;
+		}
+
+		if (nlh->nlmsg_flags & NLM_F_APPEND) {
+			if (old_rule)
+				list_add_rcu(&rule->list, &old_rule->list);
+			else
+				list_add_tail_rcu(&rule->list, &chain->rules);
+		 } else {
+			if (old_rule)
+				list_add_tail_rcu(&rule->list, &old_rule->list);
+			else
+				list_add_rcu(&rule->list, &chain->rules);
+		}
 	}
 	chain->use++;
 	return 0;
 
-err3:
-	list_del_rcu(&rule->list);
 err2:
 	nf_tables_rule_destroy(&ctx, rule);
 err1:
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 4528cff..a123d0d 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1469,6 +1469,16 @@
 		iface = rcu_dereference(netlbl_unlhsh_def);
 	if (iface == NULL || !iface->valid)
 		goto unlabel_getattr_nolabel;
+
+#if IS_ENABLED(CONFIG_IPV6)
+	/* When resolving a fallback label, check the sk_buff version as
+	 * it is possible (e.g. SCTP) to have family = PF_INET6 while
+	 * receiving ip_hdr(skb)->version = 4.
+	 */
+	if (family == PF_INET6 && ip_hdr(skb)->version == 4)
+		family = PF_INET;
+#endif /* IPv6 */
+
 	switch (family) {
 	case PF_INET: {
 		struct iphdr *hdr4;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1e97b8d..15e6e7b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1795,6 +1795,8 @@
 
 	if (msg->msg_namelen) {
 		err = -EINVAL;
+		if (msg->msg_namelen < sizeof(struct sockaddr_nl))
+			goto out;
 		if (addr->nl_family != AF_NETLINK)
 			goto out;
 		dst_portid = addr->nl_pid;
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index c5959ce..3f26611 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -149,6 +149,10 @@
 
 	pr_debug("uri: %s, len: %zu\n", uri, uri_len);
 
+	/* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */
+	if (WARN_ON_ONCE(uri_len > U8_MAX - 4))
+		return NULL;
+
 	sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
 	if (sdreq == NULL)
 		return NULL;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 102c681..dbf74af 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -68,7 +68,8 @@
 };
 
 static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
-	[NFC_SDP_ATTR_URI] = { .type = NLA_STRING },
+	[NFC_SDP_ATTR_URI] = { .type = NLA_STRING,
+			       .len = U8_MAX - 4 },
 	[NFC_SDP_ATTR_SAP] = { .type = NLA_U8 },
 };
 
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 4663939..f135814 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -906,6 +906,36 @@
 	return 0;
 }
 
+/* Trim the skb to the length specified by the IP/IPv6 header,
+ * removing any trailing lower-layer padding. This prepares the skb
+ * for higher-layer processing that assumes skb->len excludes padding
+ * (such as nf_ip_checksum). The caller needs to pull the skb to the
+ * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
+ */
+static int ovs_skb_network_trim(struct sk_buff *skb)
+{
+	unsigned int len;
+	int err;
+
+	switch (skb->protocol) {
+	case htons(ETH_P_IP):
+		len = ntohs(ip_hdr(skb)->tot_len);
+		break;
+	case htons(ETH_P_IPV6):
+		len = sizeof(struct ipv6hdr)
+			+ ntohs(ipv6_hdr(skb)->payload_len);
+		break;
+	default:
+		len = skb->len;
+	}
+
+	err = pskb_trim_rcsum(skb, len);
+	if (err)
+		kfree_skb(skb);
+
+	return err;
+}
+
 /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
  * value if 'skb' is freed.
  */
@@ -920,6 +950,10 @@
 	nh_ofs = skb_network_offset(skb);
 	skb_pull_rcsum(skb, nh_ofs);
 
+	err = ovs_skb_network_trim(skb);
+	if (err)
+		return err;
+
 	if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
 		err = handle_fragments(net, key, info->zone.id, skb);
 		if (err)
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 1668916..326945d 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -1296,13 +1296,10 @@
 
 	/* The nlattr stream should already have been validated */
 	nla_for_each_nested(nla, attr, rem) {
-		if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) {
-			if (tbl[nla_type(nla)].next)
-				tbl = tbl[nla_type(nla)].next;
-			nlattr_set(nla, val, tbl);
-		} else {
+		if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
+			nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl);
+		else
 			memset(nla_data(nla), val, nla_len(nla));
-		}
 
 		if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
 			*(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 267db0d..430a7c7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -333,11 +333,11 @@
 	skb_set_queue_mapping(skb, queue_index);
 }
 
-/* register_prot_hook must be invoked with the po->bind_lock held,
+/* __register_prot_hook must be invoked through register_prot_hook
  * or from a context in which asynchronous accesses to the packet
  * socket is not possible (packet_create()).
  */
-static void register_prot_hook(struct sock *sk)
+static void __register_prot_hook(struct sock *sk)
 {
 	struct packet_sock *po = pkt_sk(sk);
 
@@ -352,8 +352,13 @@
 	}
 }
 
-/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
- * held.   If the sync parameter is true, we will temporarily drop
+static void register_prot_hook(struct sock *sk)
+{
+	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
+	__register_prot_hook(sk);
+}
+
+/* If the sync parameter is true, we will temporarily drop
  * the po->bind_lock and do a synchronize_net to make sure no
  * asynchronous packet processing paths still refer to the elements
  * of po->prot_hook.  If the sync parameter is false, it is the
@@ -363,6 +368,8 @@
 {
 	struct packet_sock *po = pkt_sk(sk);
 
+	lockdep_assert_held_once(&po->bind_lock);
+
 	po->running = 0;
 
 	if (po->fanout)
@@ -1685,7 +1692,7 @@
 		match->flags = flags;
 		INIT_LIST_HEAD(&match->list);
 		spin_lock_init(&match->lock);
-		atomic_set(&match->sk_ref, 0);
+		refcount_set(&match->sk_ref, 0);
 		fanout_init_data(match);
 		match->prot_hook.type = po->prot_hook.type;
 		match->prot_hook.dev = po->prot_hook.dev;
@@ -1702,19 +1709,19 @@
 	    match->prot_hook.type == po->prot_hook.type &&
 	    match->prot_hook.dev == po->prot_hook.dev) {
 		err = -ENOSPC;
-		if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
+		if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
 			__dev_remove_pack(&po->prot_hook);
 			po->fanout = match;
 			po->rollover = rollover;
 			rollover = NULL;
-			atomic_inc(&match->sk_ref);
+			refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
 			__fanout_link(sk, po);
 			err = 0;
 		}
 	}
 	spin_unlock(&po->bind_lock);
 
-	if (err && !atomic_read(&match->sk_ref)) {
+	if (err && !refcount_read(&match->sk_ref)) {
 		list_del(&match->list);
 		kfree(match);
 	}
@@ -1740,7 +1747,7 @@
 	if (f) {
 		po->fanout = NULL;
 
-		if (atomic_dec_and_test(&f->sk_ref))
+		if (refcount_dec_and_test(&f->sk_ref))
 			list_del(&f->list);
 		else
 			f = NULL;
@@ -2903,13 +2910,15 @@
 	if (skb == NULL)
 		goto out_unlock;
 
-	skb_set_network_header(skb, reserve);
+	skb_reset_network_header(skb);
 
 	err = -EINVAL;
 	if (sock->type == SOCK_DGRAM) {
 		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
 		if (unlikely(offset < 0))
 			goto out_free;
+	} else if (reserve) {
+		skb_push(skb, reserve);
 	}
 
 	/* Returns -EFAULT on error */
@@ -3017,6 +3026,7 @@
 
 	packet_flush_mclist(sk);
 
+	lock_sock(sk);
 	if (po->rx_ring.pg_vec) {
 		memset(&req_u, 0, sizeof(req_u));
 		packet_set_ring(sk, &req_u, 1, 0);
@@ -3026,6 +3036,7 @@
 		memset(&req_u, 0, sizeof(req_u));
 		packet_set_ring(sk, &req_u, 1, 1);
 	}
+	release_sock(sk);
 
 	f = fanout_release(sk);
 
@@ -3259,7 +3270,7 @@
 
 	if (proto) {
 		po->prot_hook.type = proto;
-		register_prot_hook(sk);
+		__register_prot_hook(sk);
 	}
 
 	mutex_lock(&net->packet.sklist_lock);
@@ -3654,6 +3665,7 @@
 		union tpacket_req_u req_u;
 		int len;
 
+		lock_sock(sk);
 		switch (po->tp_version) {
 		case TPACKET_V1:
 		case TPACKET_V2:
@@ -3664,12 +3676,17 @@
 			len = sizeof(req_u.req3);
 			break;
 		}
-		if (optlen < len)
-			return -EINVAL;
-		if (copy_from_user(&req_u.req, optval, len))
-			return -EFAULT;
-		return packet_set_ring(sk, &req_u, 0,
-			optname == PACKET_TX_RING);
+		if (optlen < len) {
+			ret = -EINVAL;
+		} else {
+			if (copy_from_user(&req_u.req, optval, len))
+				ret = -EFAULT;
+			else
+				ret = packet_set_ring(sk, &req_u, 0,
+						    optname == PACKET_TX_RING);
+		}
+		release_sock(sk);
+		return ret;
 	}
 	case PACKET_COPY_THRESH:
 	{
@@ -3735,12 +3752,18 @@
 
 		if (optlen != sizeof(val))
 			return -EINVAL;
-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
-			return -EBUSY;
 		if (copy_from_user(&val, optval, sizeof(val)))
 			return -EFAULT;
-		po->tp_loss = !!val;
-		return 0;
+
+		lock_sock(sk);
+		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+			ret = -EBUSY;
+		} else {
+			po->tp_loss = !!val;
+			ret = 0;
+		}
+		release_sock(sk);
+		return ret;
 	}
 	case PACKET_AUXDATA:
 	{
@@ -3751,7 +3774,9 @@
 		if (copy_from_user(&val, optval, sizeof(val)))
 			return -EFAULT;
 
+		lock_sock(sk);
 		po->auxdata = !!val;
+		release_sock(sk);
 		return 0;
 	}
 	case PACKET_ORIGDEV:
@@ -3763,7 +3788,9 @@
 		if (copy_from_user(&val, optval, sizeof(val)))
 			return -EFAULT;
 
+		lock_sock(sk);
 		po->origdev = !!val;
+		release_sock(sk);
 		return 0;
 	}
 	case PACKET_VNET_HDR:
@@ -3772,15 +3799,20 @@
 
 		if (sock->type != SOCK_RAW)
 			return -EINVAL;
-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
-			return -EBUSY;
 		if (optlen < sizeof(val))
 			return -EINVAL;
 		if (copy_from_user(&val, optval, sizeof(val)))
 			return -EFAULT;
 
-		po->has_vnet_hdr = !!val;
-		return 0;
+		lock_sock(sk);
+		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+			ret = -EBUSY;
+		} else {
+			po->has_vnet_hdr = !!val;
+			ret = 0;
+		}
+		release_sock(sk);
+		return ret;
 	}
 	case PACKET_TIMESTAMP:
 	{
@@ -3818,11 +3850,17 @@
 
 		if (optlen != sizeof(val))
 			return -EINVAL;
-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
-			return -EBUSY;
 		if (copy_from_user(&val, optval, sizeof(val)))
 			return -EFAULT;
-		po->tp_tx_has_off = !!val;
+
+		lock_sock(sk);
+		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+			ret = -EBUSY;
+		} else {
+			po->tp_tx_has_off = !!val;
+			ret = 0;
+		}
+		release_sock(sk);
 		return 0;
 	}
 	case PACKET_QDISC_BYPASS:
@@ -4219,7 +4257,6 @@
 	/* Added to avoid minimal code churn */
 	struct tpacket_req *req = &req_u->req;
 
-	lock_sock(sk);
 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
 		net_warn_ratelimited("Tx-ring is not supported.\n");
@@ -4355,7 +4392,6 @@
 	if (pg_vec)
 		free_pg_vec(pg_vec, order, req->tp_block_nr);
 out:
-	release_sock(sk);
 	return err;
 }
 
diff --git a/net/packet/internal.h b/net/packet/internal.h
index d55bfc3..b8d5618 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -1,6 +1,8 @@
 #ifndef __PACKET_INTERNAL_H__
 #define __PACKET_INTERNAL_H__
 
+#include <linux/refcount.h>
+
 struct packet_mclist {
 	struct packet_mclist	*next;
 	int			ifindex;
@@ -86,7 +88,7 @@
 	struct list_head	list;
 	struct sock		*arr[PACKET_FANOUT_MAX];
 	spinlock_t		lock;
-	atomic_t		sk_ref;
+	refcount_t		sk_ref;
 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
 };
 
@@ -109,10 +111,12 @@
 	int			copy_thresh;
 	spinlock_t		bind_lock;
 	struct mutex		pg_vec_lock;
-	unsigned int		running:1,	/* prot_hook is attached*/
-				auxdata:1,
+	unsigned int		running;	/* bind_lock must be held */
+	unsigned int		auxdata:1,	/* writer must hold sock lock */
 				origdev:1,
-				has_vnet_hdr:1;
+				has_vnet_hdr:1,
+				tp_loss:1,
+				tp_tx_has_off:1;
 	int			pressure;
 	int			ifindex;	/* bound device		*/
 	__be16			num;
@@ -122,8 +126,6 @@
 	enum tpacket_versions	tp_version;
 	unsigned int		tp_hdrlen;
 	unsigned int		tp_reserve;
-	unsigned int		tp_loss:1;
-	unsigned int		tp_tx_has_off:1;
 	unsigned int		tp_tstamp;
 	struct net_device __rcu	*cached_dev;
 	int			(*xmit)(struct sk_buff *skb);
diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c
index 0d11132..ff0112b 100644
--- a/net/qrtr/smd.c
+++ b/net/qrtr/smd.c
@@ -116,5 +116,6 @@
 
 module_qcom_smd_driver(qcom_smd_qrtr_driver);
 
+MODULE_ALIAS("rpmsg:IPCRTR");
 MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver");
 MODULE_LICENSE("GPL v2");
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 5680d90..0efb3d2 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -336,7 +336,8 @@
 	/* Create a CMA ID and try to bind it. This catches both
 	 * IB and iWARP capable NICs.
 	 */
-	cm_id = rdma_create_id(&init_net, NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
+	cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler,
+			       NULL, RDMA_PS_TCP, IB_QPT_RC);
 	if (IS_ERR(cm_id))
 		return PTR_ERR(cm_id);
 
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 76c01cb..d6d8b34 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -138,13 +138,18 @@
 
 	ret = rfkill_register(rfkill->rfkill_dev);
 	if (ret < 0)
-		return ret;
+		goto err_destroy;
 
 	platform_set_drvdata(pdev, rfkill);
 
 	dev_info(&pdev->dev, "%s device registered.\n", rfkill->name);
 
 	return 0;
+
+err_destroy:
+	rfkill_destroy(rfkill->rfkill_dev);
+
+	return ret;
 }
 
 static int rfkill_gpio_remove(struct platform_device *pdev)
diff --git a/net/rmnet_data/rmnet_data_vnd.c b/net/rmnet_data/rmnet_data_vnd.c
index c4ef460..3603c5e 100644
--- a/net/rmnet_data/rmnet_data_vnd.c
+++ b/net/rmnet_data/rmnet_data_vnd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -578,7 +578,7 @@
 		LOGE("Failed to to register netdev [%s]", dev->name);
 		free_netdev(dev);
 		*new_device = 0;
-		rc = RMNET_CONFIG_UNKNOWN_ERROR;
+		return RMNET_CONFIG_UNKNOWN_ERROR;
 	} else {
 		rmnet_devices[id] = dev;
 		*new_device = dev;
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 1060d14..f3ac85a 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -1166,16 +1166,19 @@
 			goto discard_unlock;
 
 		if (sp->hdr.callNumber == chan->last_call) {
-			/* For the previous service call, if completed successfully, we
-			 * discard all further packets.
-			 */
-			if (rxrpc_conn_is_service(conn) &&
-			    (chan->last_type == RXRPC_PACKET_TYPE_ACK ||
-			     sp->hdr.type == RXRPC_PACKET_TYPE_ABORT))
+			if (chan->call ||
+			    sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
 				goto discard_unlock;
 
-			/* But otherwise we need to retransmit the final packet from
-			 * data cached in the connection record.
+			/* For the previous service call, if completed
+			 * successfully, we discard all further packets.
+			 */
+			if (rxrpc_conn_is_service(conn) &&
+			    chan->last_type == RXRPC_PACKET_TYPE_ACK)
+				goto discard_unlock;
+
+			/* But otherwise we need to retransmit the final packet
+			 * from data cached in the connection record.
 			 */
 			rxrpc_post_packet_to_conn(conn, skb);
 			goto out_unlock;
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index c29362d..3e52b7fd 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -493,9 +493,10 @@
 			ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
 				       sizeof(unsigned int), &id32);
 		} else {
+			unsigned long idl = call->user_call_ID;
+
 			ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
-				       sizeof(unsigned long),
-				       &call->user_call_ID);
+				       sizeof(unsigned long), &idl);
 		}
 		if (ret < 0)
 			goto error;
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index b214a4d..1de27c3 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -78,7 +78,9 @@
 	spin_lock_bh(&call->lock);
 
 	if (call->state < RXRPC_CALL_COMPLETE) {
-		call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS;
+		call->rxtx_annotations[ix] =
+			(call->rxtx_annotations[ix] & RXRPC_TX_ANNO_LAST) |
+			RXRPC_TX_ANNO_RETRANS;
 		if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
 			rxrpc_queue_call(call);
 	}
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 95c463c..235db2c 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -634,7 +634,7 @@
 		}
 	}
 
-	return 0;
+	return -ENOENT;
 }
 
 struct ifeheadr {
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 18e7524..b57b4de 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -128,6 +128,28 @@
 	return f->next == &detached;
 }
 
+static bool fq_flow_is_throttled(const struct fq_flow *f)
+{
+	return f->next == &throttled;
+}
+
+static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
+{
+	if (head->first)
+		head->last->next = flow;
+	else
+		head->first = flow;
+	head->last = flow;
+	flow->next = NULL;
+}
+
+static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
+{
+	rb_erase(&f->rate_node, &q->delayed);
+	q->throttled_flows--;
+	fq_flow_add_tail(&q->old_flows, f);
+}
+
 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
 {
 	struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
@@ -155,15 +177,6 @@
 
 static struct kmem_cache *fq_flow_cachep __read_mostly;
 
-static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
-{
-	if (head->first)
-		head->last->next = flow;
-	else
-		head->first = flow;
-	head->last = flow;
-	flow->next = NULL;
-}
 
 /* limit number of collected flows per round */
 #define FQ_GC_MAX 8
@@ -267,6 +280,8 @@
 				     f->socket_hash != sk->sk_hash)) {
 				f->credit = q->initial_quantum;
 				f->socket_hash = sk->sk_hash;
+				if (fq_flow_is_throttled(f))
+					fq_flow_unset_throttled(q, f);
 				f->time_next_packet = 0ULL;
 			}
 			return f;
@@ -430,9 +445,7 @@
 			q->time_next_delayed_flow = f->time_next_packet;
 			break;
 		}
-		rb_erase(p, &q->delayed);
-		q->throttled_flows--;
-		fq_flow_add_tail(&q->old_flows, f);
+		fq_flow_unset_throttled(q, f);
 	}
 }
 
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index f10d339..738c55e 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1006,9 +1006,10 @@
 	struct sctp_endpoint *ep;
 	struct sctp_chunk *chunk;
 	struct sctp_inq *inqueue;
-	int state;
 	sctp_subtype_t subtype;
+	int first_time = 1;	/* is this the first time through the loop */
 	int error = 0;
+	int state;
 
 	/* The association should be held so we should be safe. */
 	ep = asoc->ep;
@@ -1019,6 +1020,30 @@
 		state = asoc->state;
 		subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
 
+		/* If the first chunk in the packet is AUTH, do special
+		 * processing specified in Section 6.3 of SCTP-AUTH spec
+		 */
+		if (first_time && subtype.chunk == SCTP_CID_AUTH) {
+			struct sctp_chunkhdr *next_hdr;
+
+			next_hdr = sctp_inq_peek(inqueue);
+			if (!next_hdr)
+				goto normal;
+
+			/* If the next chunk is COOKIE-ECHO, skip the AUTH
+			 * chunk while saving a pointer to it so we can do
+			 * Authentication later (during cookie-echo
+			 * processing).
+			 */
+			if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
+				chunk->auth_chunk = skb_clone(chunk->skb,
+							      GFP_ATOMIC);
+				chunk->auth = 1;
+				continue;
+			}
+		}
+
+normal:
 		/* SCTP-AUTH, Section 6.3:
 		 *    The receiver has a list of chunk types which it expects
 		 *    to be received only after an AUTH-chunk.  This list has
@@ -1057,6 +1082,9 @@
 		/* If there is an error on chunk, discard this packet. */
 		if (error && chunk)
 			chunk->pdiscard = 1;
+
+		if (first_time)
+			first_time = 0;
 	}
 	sctp_association_put(asoc);
 }
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index f731de3..e06083c 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -217,7 +217,7 @@
 	skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
 	chunk->subh.v = NULL; /* Subheader is no longer valid.  */
 
-	if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
+	if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <=
 	    skb_tail_pointer(chunk->skb)) {
 		/* This is not a singleton */
 		chunk->singleton = 0;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 355d95a7..f4d5efb 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -521,44 +521,47 @@
 	addr->v6.sin6_scope_id = 0;
 }
 
+static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
+			      const union sctp_addr *addr2)
+{
+	if (addr1->sa.sa_family != addr2->sa.sa_family) {
+		if (addr1->sa.sa_family == AF_INET &&
+		    addr2->sa.sa_family == AF_INET6 &&
+		    ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
+		    addr2->v6.sin6_addr.s6_addr32[3] ==
+		    addr1->v4.sin_addr.s_addr)
+			return 1;
+
+		if (addr2->sa.sa_family == AF_INET &&
+		    addr1->sa.sa_family == AF_INET6 &&
+		    ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
+		    addr1->v6.sin6_addr.s6_addr32[3] ==
+		    addr2->v4.sin_addr.s_addr)
+			return 1;
+
+		return 0;
+	}
+
+	if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
+		return 0;
+
+	/* If this is a linklocal address, compare the scope_id. */
+	if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
+	    addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
+	    addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
+		return 0;
+
+	return 1;
+}
+
 /* Compare addresses exactly.
  * v4-mapped-v6 is also in consideration.
  */
 static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
 			    const union sctp_addr *addr2)
 {
-	if (addr1->sa.sa_family != addr2->sa.sa_family) {
-		if (addr1->sa.sa_family == AF_INET &&
-		    addr2->sa.sa_family == AF_INET6 &&
-		    ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) {
-			if (addr2->v6.sin6_port == addr1->v4.sin_port &&
-			    addr2->v6.sin6_addr.s6_addr32[3] ==
-			    addr1->v4.sin_addr.s_addr)
-				return 1;
-		}
-		if (addr2->sa.sa_family == AF_INET &&
-		    addr1->sa.sa_family == AF_INET6 &&
-		    ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) {
-			if (addr1->v6.sin6_port == addr2->v4.sin_port &&
-			    addr1->v6.sin6_addr.s6_addr32[3] ==
-			    addr2->v4.sin_addr.s_addr)
-				return 1;
-		}
-		return 0;
-	}
-	if (addr1->v6.sin6_port != addr2->v6.sin6_port)
-		return 0;
-	if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
-		return 0;
-	/* If this is a linklocal address, compare the scope_id. */
-	if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
-		if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
-		    (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) {
-			return 0;
-		}
-	}
-
-	return 1;
+	return __sctp_v6_cmp_addr(addr1, addr2) &&
+	       addr1->v6.sin6_port == addr2->v6.sin6_port;
 }
 
 /* Initialize addr struct to INADDR_ANY. */
@@ -844,8 +847,8 @@
 			       const union sctp_addr *addr2,
 			       struct sctp_sock *opt)
 {
-	struct sctp_af *af1, *af2;
 	struct sock *sk = sctp_opt2sk(opt);
+	struct sctp_af *af1, *af2;
 
 	af1 = sctp_get_af_specific(addr1->sa.sa_family);
 	af2 = sctp_get_af_specific(addr2->sa.sa_family);
@@ -861,10 +864,10 @@
 	if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
 		return 1;
 
-	if (addr1->sa.sa_family != addr2->sa.sa_family)
-		return 0;
+	if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET)
+		return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr;
 
-	return af1->cmp_addr(addr1, addr2);
+	return __sctp_v6_cmp_addr(addr1, addr2);
 }
 
 /* Verify that the provided sockaddr looks bindable.   Common verification,
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 8ec20a6..bfd0686 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -144,10 +144,8 @@
 				     void *arg,
 				     sctp_cmd_seq_t *commands);
 
-static sctp_ierror_t sctp_sf_authenticate(struct net *net,
-				    const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(
 				    const struct sctp_association *asoc,
-				    const sctp_subtype_t type,
 				    struct sctp_chunk *chunk);
 
 static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
@@ -615,6 +613,38 @@
 	return SCTP_DISPOSITION_CONSUME;
 }
 
+static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk,
+				   const struct sctp_association *asoc)
+{
+	struct sctp_chunk auth;
+
+	if (!chunk->auth_chunk)
+		return true;
+
+	/* SCTP-AUTH:  auth_chunk pointer is only set when the cookie-echo
+	 * is supposed to be authenticated and we have to do delayed
+	 * authentication.  We've just recreated the association using
+	 * the information in the cookie and now it's much easier to
+	 * do the authentication.
+	 */
+
+	/* Make sure that we and the peer are AUTH capable */
+	if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
+		return false;
+
+	/* set-up our fake chunk so that we can process it */
+	auth.skb = chunk->auth_chunk;
+	auth.asoc = chunk->asoc;
+	auth.sctp_hdr = chunk->sctp_hdr;
+	auth.chunk_hdr = (struct sctp_chunkhdr *)
+				skb_push(chunk->auth_chunk,
+					 sizeof(struct sctp_chunkhdr));
+	skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
+	auth.transport = chunk->transport;
+
+	return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR;
+}
+
 /*
  * Respond to a normal COOKIE ECHO chunk.
  * We are the side that is being asked for an association.
@@ -751,36 +781,9 @@
 	if (error)
 		goto nomem_init;
 
-	/* SCTP-AUTH:  auth_chunk pointer is only set when the cookie-echo
-	 * is supposed to be authenticated and we have to do delayed
-	 * authentication.  We've just recreated the association using
-	 * the information in the cookie and now it's much easier to
-	 * do the authentication.
-	 */
-	if (chunk->auth_chunk) {
-		struct sctp_chunk auth;
-		sctp_ierror_t ret;
-
-		/* Make sure that we and the peer are AUTH capable */
-		if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
-			sctp_association_free(new_asoc);
-			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
-		}
-
-		/* set-up our fake chunk so that we can process it */
-		auth.skb = chunk->auth_chunk;
-		auth.asoc = chunk->asoc;
-		auth.sctp_hdr = chunk->sctp_hdr;
-		auth.chunk_hdr = (sctp_chunkhdr_t *)skb_push(chunk->auth_chunk,
-					    sizeof(sctp_chunkhdr_t));
-		skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
-		auth.transport = chunk->transport;
-
-		ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
-		if (ret != SCTP_IERROR_NO_ERROR) {
-			sctp_association_free(new_asoc);
-			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
-		}
+	if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) {
+		sctp_association_free(new_asoc);
+		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 	}
 
 	repl = sctp_make_cookie_ack(new_asoc, chunk);
@@ -1717,13 +1720,15 @@
 			       GFP_ATOMIC))
 		goto nomem;
 
+	if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
+		return SCTP_DISPOSITION_DISCARD;
+
 	/* Make sure no new addresses are being added during the
 	 * restart.  Though this is a pretty complicated attack
 	 * since you'd have to get inside the cookie.
 	 */
-	if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) {
+	if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands))
 		return SCTP_DISPOSITION_CONSUME;
-	}
 
 	/* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes
 	 * the peer has restarted (Action A), it MUST NOT setup a new
@@ -1828,6 +1833,9 @@
 			       GFP_ATOMIC))
 		goto nomem;
 
+	if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
+		return SCTP_DISPOSITION_DISCARD;
+
 	/* Update the content of current association.  */
 	sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
@@ -1920,6 +1928,9 @@
 	 * a COOKIE ACK.
 	 */
 
+	if (!sctp_auth_chunk_verify(net, chunk, asoc))
+		return SCTP_DISPOSITION_DISCARD;
+
 	/* Don't accidentally move back into established state. */
 	if (asoc->state < SCTP_STATE_ESTABLISHED) {
 		sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
@@ -1959,7 +1970,7 @@
 		}
 	}
 
-	repl = sctp_make_cookie_ack(new_asoc, chunk);
+	repl = sctp_make_cookie_ack(asoc, chunk);
 	if (!repl)
 		goto nomem;
 
@@ -3981,10 +3992,8 @@
  *
  * The return value is the disposition of the chunk.
  */
-static sctp_ierror_t sctp_sf_authenticate(struct net *net,
-				    const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(
 				    const struct sctp_association *asoc,
-				    const sctp_subtype_t type,
 				    struct sctp_chunk *chunk)
 {
 	struct sctp_authhdr *auth_hdr;
@@ -4083,7 +4092,7 @@
 						  commands);
 
 	auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
-	error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
+	error = sctp_sf_authenticate(asoc, chunk);
 	switch (error) {
 	case SCTP_IERROR_AUTH_BAD_HMAC:
 		/* Generate the ERROR chunk and discard the rest
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index bea0005..6825e05 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -723,7 +723,6 @@
 	return event;
 
 fail_mark:
-	sctp_chunk_put(chunk);
 	kfree_skb(skb);
 fail:
 	return NULL;
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index 6cbc935..bbee334 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -285,9 +285,9 @@
 					strp_start_rx_timer(strp);
 				}
 
+				rxm->accum_len += cand_len;
 				strp->rx_need_bytes = rxm->strp.full_len -
 						       rxm->accum_len;
-				rxm->accum_len += cand_len;
 				rxm->early_eaten = cand_len;
 				STRP_STATS_ADD(strp->stats.rx_bytes, cand_len);
 				desc->count = 0; /* Stop reading socket */
@@ -310,6 +310,7 @@
 		/* Hurray, we have a new message! */
 		del_timer(&strp->rx_msg_timer);
 		strp->rx_skb_head = NULL;
+		strp->rx_need_bytes = 0;
 		STRP_STATS_INCR(strp->stats.rx_msgs);
 
 		/* Give skb to upper layer */
@@ -374,9 +375,7 @@
 		return;
 
 	if (strp->rx_need_bytes) {
-		if (strp_peek_len(strp) >= strp->rx_need_bytes)
-			strp->rx_need_bytes = 0;
-		else
+		if (strp_peek_len(strp) < strp->rx_need_bytes)
 			return;
 	}
 
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 3200059..9ba3c46 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -79,7 +79,8 @@
 
 const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
 	[TIPC_NLA_NET_UNSPEC]		= { .type = NLA_UNSPEC },
-	[TIPC_NLA_NET_ID]		= { .type = NLA_U32 }
+	[TIPC_NLA_NET_ID]		= { .type = NLA_U32 },
+	[TIPC_NLA_NET_ADDR]		= { .type = NLA_U32 },
 };
 
 const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 3181b07..c88874f 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -95,6 +95,9 @@
 
 	ASSERT_RTNL();
 
+	if (strlen(newname) > NL80211_WIPHY_NAME_MAXLEN)
+		return -EINVAL;
+
 	/* prohibit calling the thing phy%d when %d is not its number */
 	sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken);
 	if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) {
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 30cc249..f900f5c 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -37,10 +37,10 @@
 
 country AL: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
-	(5150 - 5250 @ 80), (23), AUTO-BW
-	(5250 - 5350 @ 80), (23), DFS, AUTO-BW
-	(5470 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country AM: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -51,8 +51,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country AR:
 	(2402 - 2482 @ 40), (36)
@@ -75,8 +75,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -105,8 +105,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country BB: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -122,8 +122,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -138,8 +138,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -228,8 +228,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -282,8 +282,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -293,8 +293,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -308,8 +308,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -317,8 +317,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -353,8 +353,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -367,8 +367,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -382,8 +382,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -398,8 +398,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -407,8 +407,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -428,8 +428,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country GH: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -460,8 +460,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -505,8 +505,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -521,8 +521,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -535,8 +535,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -562,8 +562,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -571,8 +571,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -662,8 +662,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -684,8 +684,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -693,8 +693,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -702,8 +702,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -718,22 +718,22 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country MD: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country ME: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country MF: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -752,8 +752,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country MN: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -780,8 +780,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country MR: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -793,17 +793,17 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
-country MU: DFS-FCC
+country MU: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
-	(5170 - 5250 @ 80), (24), AUTO-BW
-	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
-	(5490 - 5730 @ 160), (24), DFS
-	(5735 - 5835 @ 80), (30)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country MV: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -860,8 +860,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -869,8 +869,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -911,8 +911,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country PG: DFS-FCC
 	(2402 - 2482 @ 40), (20)
@@ -938,8 +938,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -947,8 +947,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country PR: DFS-FCC
 	(2402 - 2472 @ 40), (30)
@@ -968,8 +968,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -1006,8 +1006,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -1017,8 +1017,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
         (5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -1047,8 +1047,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -1065,8 +1065,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -1074,8 +1074,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 	# 60 gHz band channels 1-4, ref: Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
@@ -1134,8 +1134,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country TT:
 	(2402 - 2482 @ 40), (20)
@@ -1209,8 +1209,8 @@
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
 	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
-	(5490 - 5710 @ 160), (30), DFS
-	(5725 - 5875 @ 80), (14)
+	(5490 - 5730 @ 160), (30), DFS
+	(5735 - 5875 @ 80), (14)
 
 country VE: DFS-FCC
 	(2402 - 2482 @ 40), (20)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 976cba3..f00e7d3 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -14868,7 +14868,8 @@
 	if (!ft_event->target_ap)
 		return;
 
-	msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL);
+	msg = nlmsg_new(100 + ft_event->ies_len + ft_event->ric_ies_len,
+			GFP_KERNEL);
 	if (!msg)
 		return;
 
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index d869b1d..3fbe584 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1197,6 +1197,7 @@
 
 	if (orig->aead) {
 		x->aead = xfrm_algo_aead_clone(orig->aead);
+		x->geniv = orig->geniv;
 		if (!x->aead)
 			goto error;
 	}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 35e60d3..773c66b 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1724,10 +1724,6 @@
 	struct sk_buff *skb;
 	int err;
 
-	err = verify_policy_dir(dir);
-	if (err)
-		return ERR_PTR(err);
-
 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 	if (!skb)
 		return ERR_PTR(-ENOMEM);
@@ -2249,10 +2245,6 @@
 	int n = 0;
 	struct net *net = sock_net(skb->sk);
 
-	err = verify_policy_dir(pi->dir);
-	if (err)
-		return err;
-
 	if (attrs[XFRMA_MIGRATE] == NULL)
 		return -EINVAL;
 
@@ -2368,11 +2360,6 @@
 {
 	struct net *net = &init_net;
 	struct sk_buff *skb;
-	int err;
-
-	err = verify_policy_dir(dir);
-	if (err)
-		return err;
 
 	skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC);
 	if (skb == NULL)
@@ -3033,11 +3020,6 @@
 
 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
 {
-	int err;
-
-	err = verify_policy_dir(dir);
-	if (err)
-		return err;
 
 	switch (c->event) {
 	case XFRM_MSG_NEWPOLICY:
diff --git a/scripts/adjust_autoksyms.sh b/scripts/adjust_autoksyms.sh
index 8dc1918..564db35 100755
--- a/scripts/adjust_autoksyms.sh
+++ b/scripts/adjust_autoksyms.sh
@@ -83,6 +83,13 @@
 	depfile="include/config/ksym/${sympath}.h"
 	mkdir -p "$(dirname "$depfile")"
 	touch "$depfile"
+	# Filesystems with coarse time precision may create timestamps
+	# equal to the one from a file that was very recently built and that
+	# needs to be rebuild. Let's guard against that by making sure our
+	# dep files are always newer than the first file we created here.
+	while [ ! "$depfile" -nt "$new_ksyms_file" ]; do
+		touch "$depfile"
+	done
 	echo $((count += 1))
 done | tail -1 )
 changed=${changed:-0}
diff --git a/scripts/build-all.py b/scripts/build-all.py
index bd468cd..4a60ebc 100755
--- a/scripts/build-all.py
+++ b/scripts/build-all.py
@@ -59,12 +59,8 @@
 
 def check_kernel():
     """Ensure that PWD is a kernel directory"""
-    have_defconfig = any([
-        os.path.isfile('arch/arm64/configs/msm_defconfig'),
-        os.path.isfile('arch/arm64/configs/sdm845_defconfig')])
-
-    if not all([os.path.isfile('MAINTAINERS'), have_defconfig]):
-        fail("This doesn't seem to be an MSM kernel dir")
+    if not os.path.isfile('MAINTAINERS'):
+        fail("This doesn't seem to be a kernel dir")
 
 def check_build():
     """Ensure that the build directory is present."""
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index cdfa754..0534378 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3012,6 +3012,10 @@
 			} elsif ($line =~ /^\+.*\bEFI_GUID\s*\(/) {
 				$msg_type = "";
 
+			# Long copyright statements are another special case
+			} elsif ($rawline =~ /^\+.\*.*copyright.*\(c\).*$/i) {
+				$msg_type = "";
+
 			# Otherwise set the alternate message types
 
 			# a comment starts before $max_line_length
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index cbf4996..ed29bad 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -113,7 +113,7 @@
 		break;
 	case E_NOT:
 		expr_free(e->left.expr);
-		return;
+		break;
 	case E_EQUAL:
 	case E_GEQ:
 	case E_GTH:
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index aed678e..4a61636 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -364,6 +364,7 @@
 			menu->parent = parent;
 			last_menu = menu;
 		}
+		expr_free(basedep);
 		if (last_menu) {
 			parent->list = parent->next;
 			parent->next = last_menu->next;
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index 71bf8bf..5122ed2 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -107,7 +107,27 @@
 %%
 input: nl start | start;
 
-start: mainmenu_stmt stmt_list | stmt_list;
+start: mainmenu_stmt stmt_list | no_mainmenu_stmt stmt_list;
+
+/* mainmenu entry */
+
+mainmenu_stmt: T_MAINMENU prompt nl
+{
+	menu_add_prompt(P_MENU, $2, NULL);
+};
+
+/* Default main menu, if there's no mainmenu entry */
+
+no_mainmenu_stmt: /* empty */
+{
+	/*
+	 * Hack: Keep the main menu title on the heap so we can safely free it
+	 * later regardless of whether it comes from the 'prompt' in
+	 * mainmenu_stmt or here
+	 */
+	menu_add_prompt(P_MENU, strdup("Linux Kernel Configuration"), NULL);
+};
+
 
 stmt_list:
 	  /* empty */
@@ -344,13 +364,6 @@
 	| if_block choice_stmt
 ;
 
-/* mainmenu entry */
-
-mainmenu_stmt: T_MAINMENU prompt nl
-{
-	menu_add_prompt(P_MENU, $2, NULL);
-};
-
 /* menu entry */
 
 menu: T_MENU prompt T_EOL
@@ -495,6 +508,7 @@
 
 void conf_parse(const char *name)
 {
+	const char *tmp;
 	struct symbol *sym;
 	int i;
 
@@ -502,7 +516,6 @@
 
 	sym_init();
 	_menu_init();
-	rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL);
 
 	if (getenv("ZCONF_DEBUG"))
 		zconfdebug = 1;
@@ -512,8 +525,10 @@
 	if (!modules_sym)
 		modules_sym = sym_find( "n" );
 
+	tmp = rootmenu.prompt->text;
 	rootmenu.prompt->text = _(rootmenu.prompt->text);
 	rootmenu.prompt->text = sym_expand_string_value(rootmenu.prompt->text);
+	free((char*)tmp);
 
 	menu_finalize(&rootmenu);
 	for_all_symbols(i, sym) {
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 3c575cd0..0a2a737 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -325,7 +325,7 @@
 
 # Build kernel header package
 (cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl) > "$objtree/debian/hdrsrcfiles"
-(cd $srctree; find arch/*/include include scripts -type f) >> "$objtree/debian/hdrsrcfiles"
+(cd $srctree; find arch/*/include include scripts -type f -o -type l) >> "$objtree/debian/hdrsrcfiles"
 (cd $srctree; find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform) >> "$objtree/debian/hdrsrcfiles"
 (cd $srctree; find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f) >> "$objtree/debian/hdrsrcfiles"
 if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then
diff --git a/security/Kconfig b/security/Kconfig
index 2e68fa4..638afc8 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -6,6 +6,11 @@
 
 source security/keys/Kconfig
 
+if ARCH_QCOM
+source security/pfe/Kconfig
+endif
+
+
 config SECURITY_DMESG_RESTRICT
 	bool "Restrict unprivileged access to the kernel syslog"
 	default n
diff --git a/security/Makefile b/security/Makefile
index f2d71cd..79166ba 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -9,6 +9,7 @@
 subdir-$(CONFIG_SECURITY_APPARMOR)	+= apparmor
 subdir-$(CONFIG_SECURITY_YAMA)		+= yama
 subdir-$(CONFIG_SECURITY_LOADPIN)	+= loadpin
+subdir-$(CONFIG_ARCH_QCOM)	+= pfe
 
 # always enable default capabilities
 obj-y					+= commoncap.o
@@ -24,6 +25,7 @@
 obj-$(CONFIG_SECURITY_APPARMOR)		+= apparmor/
 obj-$(CONFIG_SECURITY_YAMA)		+= yama/
 obj-$(CONFIG_SECURITY_LOADPIN)		+= loadpin/
+obj-$(CONFIG_ARCH_QCOM)				+= pfe/
 obj-$(CONFIG_CGROUP_DEVICE)		+= device_cgroup.o
 
 # Object integrity file lists
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 4304372..95433ac 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -18,6 +18,7 @@
 #include <linux/cred.h>
 #include <linux/key-type.h>
 #include <linux/digsig.h>
+#include <linux/vmalloc.h>
 #include <crypto/public_key.h>
 #include <keys/system_keyring.h>
 
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 38f2ed8..93f0917 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -78,6 +78,8 @@
 		       hash_algo_name[ima_hash_algo], rc);
 		return rc;
 	}
+	pr_info("Allocated hash algorithm: %s\n",
+		hash_algo_name[ima_hash_algo]);
 	return 0;
 }
 
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 2b3def1..a71f906 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -16,6 +16,9 @@
  *	implements the IMA hooks: ima_bprm_check, ima_file_mmap,
  *	and ima_file_check.
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/file.h>
 #include <linux/binfmts.h>
@@ -426,6 +429,16 @@
 
 	hash_setup(CONFIG_IMA_DEFAULT_HASH);
 	error = ima_init();
+
+	if (error && strcmp(hash_algo_name[ima_hash_algo],
+			    CONFIG_IMA_DEFAULT_HASH) != 0) {
+		pr_info("Allocating %s failed, going to use default hash algorithm %s\n",
+			hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH);
+		hash_setup_done = 0;
+		hash_setup(CONFIG_IMA_DEFAULT_HASH);
+		error = ima_init();
+	}
+
 	if (!error) {
 		ima_initialized = 1;
 		ima_update_policy_flag();
diff --git a/security/pfe/Kconfig b/security/pfe/Kconfig
new file mode 100644
index 0000000..0cd9e81
--- /dev/null
+++ b/security/pfe/Kconfig
@@ -0,0 +1,28 @@
+menu "Qualcomm Technologies, Inc Per File Encryption security device drivers"
+	depends on ARCH_QCOM
+
+config PFT
+	bool "Per-File-Tagger driver"
+	depends on SECURITY
+	default n
+	help
+		This driver is used for tagging enterprise files.
+		It is part of the Per-File-Encryption (PFE) feature.
+		The driver is tagging files when created by
+		registered application.
+		Tagged files are encrypted using the dm-req-crypt driver.
+
+config PFK
+	bool "Per-File-Key driver"
+	depends on SECURITY
+	depends on SECURITY_SELINUX
+	default n
+	help
+		This driver is used for storing eCryptfs information
+		in file node.
+		This is part of eCryptfs hardware enhanced solution
+		provided by Qualcomm Technologies, Inc.
+		Information is used when file is encrypted later using
+		ICE or dm crypto engine
+
+endmenu
diff --git a/security/pfe/Makefile b/security/pfe/Makefile
new file mode 100644
index 0000000..4096aad
--- /dev/null
+++ b/security/pfe/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the MSM specific security device drivers.
+#
+
+ccflags-y += -Isecurity/selinux -Isecurity/selinux/include
+#ccflags-y += -Ifs/ext4
+ccflags-y += -Ifs/crypto
+
+obj-$(CONFIG_PFT) += pft.o
+obj-$(CONFIG_PFK) += pfk.o pfk_kc.o pfk_ice.o pfk_ext4.o pfk_f2fs.o
diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c
new file mode 100644
index 0000000..740da32
--- /dev/null
+++ b/security/pfe/pfk.c
@@ -0,0 +1,533 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Per-File-Key (PFK).
+ *
+ * This driver is responsible for overall management of various
+ * Per File Encryption variants that work on top of or as part of different
+ * file systems.
+ *
+ * The driver has the following purpose :
+ * 1) Define priorities between PFE's if more than one is enabled
+ * 2) Extract key information from inode
+ * 3) Load and manage various keys in ICE HW engine
+ * 4) It should be invoked from various layers in FS/BLOCK/STORAGE DRIVER
+ *    that need to take decision on HW encryption management of the data
+ *    Some examples:
+ *	BLOCK LAYER: when it takes decision on whether 2 chunks can be united
+ *	to one encryption / decryption request sent to the HW
+ *
+ *	UFS DRIVER: when it need to configure ICE HW with a particular key slot
+ *	to be used for encryption / decryption
+ *
+ * PFE variants can differ on particular way of storing the cryptographic info
+ * inside inode, actions to be taken upon file operations, etc., but the common
+ * properties are described above
+ *
+ */
+
+
+/* Uncomment the line below to enable debug messages */
+/* #define DEBUG 1 */
+#define pr_fmt(fmt)	"pfk [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+#include <linux/bio.h>
+#include <linux/security.h>
+#include <crypto/algapi.h>
+#include <crypto/ice.h>
+
+#include <linux/pfk.h>
+
+#include "pfk_kc.h"
+#include "objsec.h"
+#include "pfk_ice.h"
+#include "pfk_ext4.h"
+#include "pfk_f2fs.h"
+#include "pfk_internal.h"
+//#include "ext4.h"
+
+static bool pfk_ready;
+
+
+/* might be replaced by a table when more than one cipher is supported */
+#define PFK_SUPPORTED_KEY_SIZE 32
+#define PFK_SUPPORTED_SALT_SIZE 32
+
+/* Various PFE types and function tables to support each one of them */
+enum pfe_type {EXT4_CRYPT_PFE, F2FS_CRYPT_PFE, INVALID_PFE};
+
+typedef int (*pfk_parse_inode_type)(const struct bio *bio,
+	const struct inode *inode,
+	struct pfk_key_info *key_info,
+	enum ice_cryto_algo_mode *algo,
+	bool *is_pfe);
+
+typedef bool (*pfk_allow_merge_bio_type)(const struct bio *bio1,
+	const struct bio *bio2, const struct inode *inode1,
+	const struct inode *inode2);
+
+static const pfk_parse_inode_type pfk_parse_inode_ftable[] = {
+	/* EXT4_CRYPT_PFE */ &pfk_ext4_parse_inode,
+    /* F2FS_CRYPT_PFE */ &pfk_f2fs_parse_inode,
+};
+
+static const pfk_allow_merge_bio_type pfk_allow_merge_bio_ftable[] = {
+	/* EXT4_CRYPT_PFE */ &pfk_ext4_allow_merge_bio,
+    /* F2FS_CRYPT_PFE */ &pfk_f2fs_allow_merge_bio,
+};
+
+static void __exit pfk_exit(void)
+{
+	pfk_ready = false;
+	pfk_ext4_deinit();
+	pfk_f2fs_deinit();
+	pfk_kc_deinit();
+}
+
+static int __init pfk_init(void)
+{
+
+	int ret = 0;
+
+	ret = pfk_ext4_init();
+	if (ret != 0)
+		goto fail;
+
+	ret = pfk_f2fs_init();
+	if (ret != 0)
+		goto fail;
+
+	ret = pfk_kc_init();
+	if (ret != 0) {
+		pr_err("could init pfk key cache, error %d\n", ret);
+		pfk_ext4_deinit();
+		pfk_f2fs_deinit();
+		goto fail;
+	}
+
+	pfk_ready = true;
+	pr_info("Driver initialized successfully\n");
+
+	return 0;
+
+fail:
+	pr_err("Failed to init driver\n");
+	return -ENODEV;
+}
+
+/*
+ * If more than one type is supported simultaneously, this function will also
+ * set the priority between them
+ */
+static enum pfe_type pfk_get_pfe_type(const struct inode *inode)
+{
+	if (!inode)
+		return INVALID_PFE;
+
+	if (pfk_is_ext4_type(inode))
+		return EXT4_CRYPT_PFE;
+
+	if (pfk_is_f2fs_type(inode))
+		return F2FS_CRYPT_PFE;
+
+	return INVALID_PFE;
+}
+
+/**
+ * inode_to_filename() - get the filename from inode pointer.
+ * @inode: inode pointer
+ *
+ * it is used for debug prints.
+ *
+ * Return: filename string or "unknown".
+ */
+char *inode_to_filename(const struct inode *inode)
+{
+	struct dentry *dentry = NULL;
+	char *filename = NULL;
+
+	if (!inode)
+		return "NULL";
+
+	if (hlist_empty(&inode->i_dentry))
+		return "unknown";
+
+	dentry = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
+	filename = dentry->d_iname;
+
+	return filename;
+}
+
+/**
+ * pfk_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_is_ready(void)
+{
+	return pfk_ready;
+}
+
+/**
+ * pfk_bio_get_inode() - get the inode from a bio.
+ * @bio: Pointer to BIO structure.
+ *
+ * Walk the bio struct links to get the inode.
+ * Please note, that in general bio may consist of several pages from
+ * several files, but in our case we always assume that all pages come
+ * from the same file, since our logic ensures it. That is why we only
+ * walk through the first page to look for inode.
+ *
+ * Return: pointer to the inode struct if successful, or NULL otherwise.
+ *
+ */
+static struct inode *pfk_bio_get_inode(const struct bio *bio)
+{
+	if (!bio)
+		return NULL;
+	if (!bio_has_data((struct bio *)bio))
+		return NULL;
+	if (!bio->bi_io_vec)
+		return NULL;
+	if (!bio->bi_io_vec->bv_page)
+		return NULL;
+
+	if (PageAnon(bio->bi_io_vec->bv_page)) {
+		struct inode *inode;
+
+		/* Using direct-io (O_DIRECT) without page cache */
+		inode = dio_bio_get_inode((struct bio *)bio);
+		pr_debug("inode on direct-io, inode = 0x%pK.\n", inode);
+
+		return inode;
+	}
+
+	if (!page_mapping(bio->bi_io_vec->bv_page))
+		return NULL;
+
+	if (!bio->bi_io_vec->bv_page->mapping->host)
+
+		return NULL;
+
+	return bio->bi_io_vec->bv_page->mapping->host;
+}
+
+/**
+ * pfk_key_size_to_key_type() - translate key size to key size enum
+ * @key_size: key size in bytes
+ * @key_size_type: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported key size)
+ */
+int pfk_key_size_to_key_type(size_t key_size,
+	enum ice_crpto_key_size *key_size_type)
+{
+	/*
+	 *  currently only 32 bit key size is supported
+	 *  in the future, table with supported key sizes might
+	 *  be introduced
+	 */
+
+	if (key_size != PFK_SUPPORTED_KEY_SIZE) {
+		pr_err("not supported key size %zu\n", key_size);
+		return -EINVAL;
+	}
+
+	if (key_size_type)
+		*key_size_type = ICE_CRYPTO_KEY_SIZE_256;
+
+	return 0;
+}
+
+/*
+ * Retrieves filesystem type from inode's superblock
+ */
+bool pfe_is_inode_filesystem_type(const struct inode *inode,
+	const char *fs_type)
+{
+	if (!inode || !fs_type)
+		return false;
+
+	if (!inode->i_sb)
+		return false;
+
+	if (!inode->i_sb->s_type)
+		return false;
+
+	return (strcmp(inode->i_sb->s_type->name, fs_type) == 0);
+}
+
+/**
+ * pfk_get_key_for_bio() - get the encryption key to be used for a bio
+ *
+ * @bio: pointer to the BIO
+ * @key_info: pointer to the key information which will be filled in
+ * @algo_mode: optional pointer to the algorithm identifier which will be set
+ * @is_pfe: will be set to false if the BIO should be left unencrypted
+ *
+ * Return: 0 if a key is being used, otherwise a -errno value
+ */
+static int pfk_get_key_for_bio(const struct bio *bio,
+		struct pfk_key_info *key_info,
+		enum ice_cryto_algo_mode *algo_mode,
+		bool *is_pfe)
+{
+	const struct inode *inode;
+	enum pfe_type which_pfe;
+	const struct blk_encryption_key *key;
+
+	inode = pfk_bio_get_inode(bio);
+	which_pfe = pfk_get_pfe_type(inode);
+
+	if (which_pfe != INVALID_PFE) {
+		/* Encrypted file; override ->bi_crypt_key */
+		pr_debug("parsing inode %lu with PFE type %d\n",
+			 inode->i_ino, which_pfe);
+		return (*(pfk_parse_inode_ftable[which_pfe]))
+				(bio, inode, key_info, algo_mode, is_pfe);
+	}
+
+	/*
+	 * bio is not for an encrypted file.  Use ->bi_crypt_key if it was set.
+	 * Otherwise, don't encrypt/decrypt the bio.
+	 */
+	key = bio->bi_crypt_key;
+	if (!key) {
+		*is_pfe = false;
+		return -EINVAL;
+	}
+
+	/* Note: the "salt" is really just the second half of the XTS key. */
+	BUILD_BUG_ON(sizeof(key->raw) !=
+		     PFK_SUPPORTED_KEY_SIZE + PFK_SUPPORTED_SALT_SIZE);
+	key_info->key = &key->raw[0];
+	key_info->key_size = PFK_SUPPORTED_KEY_SIZE;
+	key_info->salt = &key->raw[PFK_SUPPORTED_KEY_SIZE];
+	key_info->salt_size = PFK_SUPPORTED_SALT_SIZE;
+	if (algo_mode)
+		*algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+	return 0;
+}
+
+
+/**
+ * pfk_load_key_start() - loads PFE encryption key to the ICE
+ *			  Can also be invoked from non
+ *			  PFE context, in this case it
+ *			  is not relevant and is_pfe
+ *			  flag is set to false
+ *
+ * @bio: Pointer to the BIO structure
+ * @ice_setting: Pointer to ice setting structure that will be filled with
+ * ice configuration values, including the index to which the key was loaded
+ *  @is_pfe: will be false if inode is not relevant to PFE, in such a case
+ * it should be treated as non PFE by the block layer
+ *
+ * Returns the index where the key is stored in encryption hw and additional
+ * information that will be used later for configuration of the encryption hw.
+ *
+ * Must be followed by pfk_load_key_end when key is no longer used by ice
+ *
+ */
+int pfk_load_key_start(const struct bio *bio,
+		struct ice_crypto_setting *ice_setting, bool *is_pfe,
+		bool async)
+{
+	int ret = 0;
+	struct pfk_key_info key_info = {NULL, NULL, 0, 0};
+	enum ice_cryto_algo_mode algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+	enum ice_crpto_key_size key_size_type = 0;
+	u32 key_index = 0;
+
+	if (!is_pfe) {
+		pr_err("is_pfe is NULL\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * only a few errors below can indicate that
+	 * this function was not invoked within PFE context,
+	 * otherwise we will consider it PFE
+	 */
+	*is_pfe = true;
+
+	if (!pfk_is_ready())
+		return -ENODEV;
+
+	if (!ice_setting) {
+		pr_err("ice setting is NULL\n");
+		return -EINVAL;
+	}
+
+	ret = pfk_get_key_for_bio(bio, &key_info, &algo_mode, is_pfe);
+
+	if (ret != 0)
+		return ret;
+
+	ret = pfk_key_size_to_key_type(key_info.key_size, &key_size_type);
+	if (ret != 0)
+		return ret;
+
+	ret = pfk_kc_load_key_start(key_info.key, key_info.key_size,
+			key_info.salt, key_info.salt_size, &key_index, async);
+	if (ret) {
+		if (ret != -EBUSY && ret != -EAGAIN)
+			pr_err("start: could not load key into pfk key cache, error %d\n",
+					ret);
+
+		return ret;
+	}
+
+	ice_setting->key_size = key_size_type;
+	ice_setting->algo_mode = algo_mode;
+	/* hardcoded for now */
+	ice_setting->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY;
+	ice_setting->key_index = key_index;
+
+	pr_debug("loaded key for file %s key_index %d\n",
+		inode_to_filename(pfk_bio_get_inode(bio)), key_index);
+
+	return 0;
+}
+
+/**
+ * pfk_load_key_end() - marks the PFE key as no longer used by ICE
+ *			Can also be invoked from non
+ *			PFE context, in this case it is not
+ *			relevant and is_pfe flag is
+ *			set to false
+ *
+ * @bio: Pointer to the BIO structure
+ * @is_pfe: Pointer to is_pfe flag, which will be true if function was invoked
+ *			from PFE context
+ */
+int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+	int ret = 0;
+	struct pfk_key_info key_info = {NULL, NULL, 0, 0};
+
+	if (!is_pfe) {
+		pr_err("is_pfe is NULL\n");
+		return -EINVAL;
+	}
+
+	/* only a few errors below can indicate that
+	 * this function was not invoked within PFE context,
+	 * otherwise we will consider it PFE
+	 */
+	*is_pfe = true;
+
+	if (!pfk_is_ready())
+		return -ENODEV;
+
+	ret = pfk_get_key_for_bio(bio, &key_info, NULL, is_pfe);
+	if (ret != 0)
+		return ret;
+
+	pfk_kc_load_key_end(key_info.key, key_info.key_size,
+		key_info.salt, key_info.salt_size);
+
+	pr_debug("finished using key for file %s\n",
+		inode_to_filename(pfk_bio_get_inode(bio)));
+
+	return 0;
+}
+
+/**
+ * pfk_allow_merge_bio() - Check if 2 BIOs can be merged.
+ * @bio1:	Pointer to first BIO structure.
+ * @bio2:	Pointer to second BIO structure.
+ *
+ * Prevent merging of BIOs from encrypted and non-encrypted
+ * files, or files encrypted with different key.
+ * Also prevent non encrypted and encrypted data from the same file
+ * to be merged (ecryptfs header if stored inside file should be non
+ * encrypted)
+ * This API is called by the file system block layer.
+ *
+ * Return: true if the BIOs allowed to be merged, false
+ * otherwise.
+ */
+bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2)
+{
+	const struct blk_encryption_key *key1;
+	const struct blk_encryption_key *key2;
+	const struct inode *inode1;
+	const struct inode *inode2;
+	enum pfe_type which_pfe1;
+	enum pfe_type which_pfe2;
+
+	if (!pfk_is_ready())
+		return false;
+
+	if (!bio1 || !bio2)
+		return false;
+
+	if (bio1 == bio2)
+		return true;
+
+	key1 = bio1->bi_crypt_key;
+	key2 = bio2->bi_crypt_key;
+
+	inode1 = pfk_bio_get_inode(bio1);
+	inode2 = pfk_bio_get_inode(bio2);
+
+	which_pfe1 = pfk_get_pfe_type(inode1);
+	which_pfe2 = pfk_get_pfe_type(inode2);
+
+	/*
+	 * If one bio is for an encrypted file and the other is for a different
+	 * type of encrypted file or for blocks that are not part of an
+	 * encrypted file, do not merge.
+	 */
+	if (which_pfe1 != which_pfe2)
+		return false;
+
+	if (which_pfe1 != INVALID_PFE) {
+		/* Both bios are for the same type of encrypted file. */
+	return (*(pfk_allow_merge_bio_ftable[which_pfe1]))(bio1, bio2,
+		inode1, inode2);
+	}
+
+	/*
+	 * Neither bio is for an encrypted file.  Merge only if the default keys
+	 * are the same (or both are NULL).
+	 */
+	return key1 == key2 ||
+		(key1 && key2 &&
+		 !crypto_memneq(key1->raw, key2->raw, sizeof(key1->raw)));
+}
+
+/**
+ * Flush key table on storage core reset. During core reset key configuration
+ * is lost in ICE. We need to flash the cache, so that the keys will be
+ * reconfigured again for every subsequent transaction
+ */
+void pfk_clear_on_reset(void)
+{
+	if (!pfk_is_ready())
+		return;
+
+	pfk_kc_clear_on_reset();
+}
+
+module_init(pfk_init);
+module_exit(pfk_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Per-File-Key driver");
diff --git a/security/pfe/pfk_ext4.c b/security/pfe/pfk_ext4.c
new file mode 100644
index 0000000..0eb1225
--- /dev/null
+++ b/security/pfe/pfk_ext4.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Per-File-Key (PFK) - EXT4
+ *
+ * This driver is used for working with EXT4 crypt extension
+ *
+ * The key information  is stored in node by EXT4 when file is first opened
+ * and will be later accessed by Block Device Driver to actually load the key
+ * to encryption hw.
+ *
+ * PFK exposes API's for loading and removing keys from encryption hw
+ * and also API to determine whether 2 adjacent blocks can be agregated by
+ * Block Layer in one request to encryption hw.
+ *
+ */
+
+
+/* Uncomment the line below to enable debug messages */
+/* #define DEBUG 1 */
+#define pr_fmt(fmt)	"pfk_ext4 [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+
+#include "fscrypt_ice.h"
+#include "pfk_ext4.h"
+//#include "ext4_ice.h"
+
+static bool pfk_ext4_ready;
+
+/*
+ * pfk_ext4_deinit() - Deinit function, should be invoked by upper PFK layer
+ */
+void pfk_ext4_deinit(void)
+{
+	pfk_ext4_ready = false;
+}
+
+/*
+ * pfk_ecryptfs_init() - Init function, should be invoked by upper PFK layer
+ */
+int __init pfk_ext4_init(void)
+{
+	pfk_ext4_ready = true;
+	pr_info("PFK EXT4 inited successfully\n");
+
+	return 0;
+}
+
+/**
+ * pfk_ecryptfs_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_ext4_is_ready(void)
+{
+	return pfk_ext4_ready;
+}
+
+/**
+ * pfk_ext4_dump_inode() - dumps all interesting info about inode to the screen
+ *
+ *
+ */
+/*
+ * static void pfk_ext4_dump_inode(const struct inode* inode)
+ * {
+ *	struct ext4_crypt_info *ci = ext4_encryption_info((struct inode*)inode);
+ *
+ *	pr_debug("dumping inode with address 0x%p\n", inode);
+ *	pr_debug("S_ISREG is %d\n", S_ISREG(inode->i_mode));
+ *	pr_debug("EXT4_INODE_ENCRYPT flag is %d\n",
+ *		ext4_test_inode_flag((struct inode*)inode, EXT4_INODE_ENCRYPT));
+ *	if (ci) {
+ *		pr_debug("crypt_info address 0x%p\n", ci);
+ *		pr_debug("ci->ci_data_mode %d\n", ci->ci_data_mode);
+ *	} else {
+ *		pr_debug("crypt_info is NULL\n");
+ *	}
+ * }
+ */
+
+/**
+ * pfk_is_ext4_type() - return true if inode belongs to ICE EXT4 PFE
+ * @inode: inode pointer
+ */
+bool pfk_is_ext4_type(const struct inode *inode)
+{
+	if (!pfe_is_inode_filesystem_type(inode, "ext4"))
+		return false;
+
+	return fscrypt_should_be_processed_by_ice(inode);
+}
+
+/**
+ * pfk_ext4_parse_cipher() - parse cipher from inode to enum
+ * @inode: inode
+ * @algo: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported cipher)
+ */
+static int pfk_ext4_parse_cipher(const struct inode *inode,
+	enum ice_cryto_algo_mode *algo)
+{
+	/*
+	 * currently only AES XTS algo is supported
+	 * in the future, table with supported ciphers might
+	 * be introduced
+	 */
+
+	if (!inode)
+		return -EINVAL;
+
+	if (!fscrypt_is_aes_xts_cipher(inode)) {
+		pr_err("ext4 alghoritm is not supported by pfk\n");
+		return -EINVAL;
+	}
+
+	if (algo)
+		*algo = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+
+	return 0;
+}
+
+
+int pfk_ext4_parse_inode(const struct bio *bio,
+	const struct inode *inode,
+	struct pfk_key_info *key_info,
+	enum ice_cryto_algo_mode *algo,
+	bool *is_pfe)
+{
+	int ret = 0;
+
+	if (!is_pfe)
+		return -EINVAL;
+
+	/*
+	 * only a few errors below can indicate that
+	 * this function was not invoked within PFE context,
+	 * otherwise we will consider it PFE
+	 */
+	*is_pfe = true;
+
+	if (!pfk_ext4_is_ready())
+		return -ENODEV;
+
+	if (!inode)
+		return -EINVAL;
+
+	if (!key_info)
+		return -EINVAL;
+
+	key_info->key = fscrypt_get_ice_encryption_key(inode);
+	if (!key_info->key) {
+		pr_err("could not parse key from ext4\n");
+		return -EINVAL;
+	}
+
+	key_info->key_size = fscrypt_get_ice_encryption_key_size(inode);
+	if (!key_info->key_size) {
+		pr_err("could not parse key size from ext4\n");
+		return -EINVAL;
+	}
+
+	key_info->salt = fscrypt_get_ice_encryption_salt(inode);
+	if (!key_info->salt) {
+		pr_err("could not parse salt from ext4\n");
+		return -EINVAL;
+	}
+
+	key_info->salt_size = fscrypt_get_ice_encryption_salt_size(inode);
+	if (!key_info->salt_size) {
+		pr_err("could not parse salt size from ext4\n");
+		return -EINVAL;
+	}
+
+	ret = pfk_ext4_parse_cipher(inode, algo);
+	if (ret != 0) {
+		pr_err("not supported cipher\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
+	const struct bio *bio2, const struct inode *inode1,
+	const struct inode *inode2)
+{
+	/* if there is no ext4 pfk, don't disallow merging blocks */
+	if (!pfk_ext4_is_ready())
+		return true;
+
+	if (!inode1 || !inode2)
+		return false;
+
+	return fscrypt_is_ice_encryption_info_equal(inode1, inode2);
+}
diff --git a/security/pfe/pfk_ext4.h b/security/pfe/pfk_ext4.h
new file mode 100644
index 0000000..c33232f
--- /dev/null
+++ b/security/pfe/pfk_ext4.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PFK_EXT4_H_
+#define _PFK_EXT4_H_
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <crypto/ice.h>
+#include "pfk_internal.h"
+
+bool pfk_is_ext4_type(const struct inode *inode);
+
+int pfk_ext4_parse_inode(const struct bio *bio,
+	const struct inode *inode,
+	struct pfk_key_info *key_info,
+	enum ice_cryto_algo_mode *algo,
+	bool *is_pfe);
+
+bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
+	const struct bio *bio2, const struct inode *inode1,
+	const struct inode *inode2);
+
+int __init pfk_ext4_init(void);
+
+void pfk_ext4_deinit(void);
+
+#endif /* _PFK_EXT4_H_ */
diff --git a/security/pfe/pfk_f2fs.c b/security/pfe/pfk_f2fs.c
new file mode 100644
index 0000000..8b9d515
--- /dev/null
+++ b/security/pfe/pfk_f2fs.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Per-File-Key (PFK) - f2fs
+ *
+ * This driver is used for working with EXT4/F2FS crypt extension
+ *
+ * The key information  is stored in node by EXT4/F2FS when file is first opened
+ * and will be later accessed by Block Device Driver to actually load the key
+ * to encryption hw.
+ *
+ * PFK exposes API's for loading and removing keys from encryption hw
+ * and also API to determine whether 2 adjacent blocks can be agregated by
+ * Block Layer in one request to encryption hw.
+ *
+ */
+
+
+/* Uncomment the line below to enable debug messages */
+#define DEBUG 1
+#define pr_fmt(fmt)	"pfk_f2fs [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+
+#include "fscrypt_ice.h"
+#include "pfk_f2fs.h"
+
+static bool pfk_f2fs_ready;
+
+/*
+ * pfk_f2fs_deinit() - Deinit function, should be invoked by upper PFK layer
+ */
+void pfk_f2fs_deinit(void)
+{
+	pfk_f2fs_ready = false;
+}
+
+/*
+ * pfk_f2fs_init() - Init function, should be invoked by upper PFK layer
+ */
+int __init pfk_f2fs_init(void)
+{
+	pfk_f2fs_ready = true;
+	pr_info("PFK F2FS inited successfully\n");
+
+	return 0;
+}
+
+/**
+ * pfk_f2fs_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_f2fs_is_ready(void)
+{
+	return pfk_f2fs_ready;
+}
+
+/**
+ * pfk_is_f2fs_type() - return true if inode belongs to ICE F2FS PFE
+ * @inode: inode pointer
+ */
+bool pfk_is_f2fs_type(const struct inode *inode)
+{
+	if (!pfe_is_inode_filesystem_type(inode, "f2fs"))
+		return false;
+
+	return fscrypt_should_be_processed_by_ice(inode);
+}
+
+/**
+ * pfk_f2fs_parse_cipher() - parse cipher from inode to enum
+ * @inode: inode
+ * @algo: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported cipher)
+ */
+static int pfk_f2fs_parse_cipher(const struct inode *inode,
+		enum ice_cryto_algo_mode *algo)
+{
+	/*
+	 * currently only AES XTS algo is supported
+	 * in the future, table with supported ciphers might
+	 * be introduced
+	 */
+	if (!inode)
+		return -EINVAL;
+
+	if (!fscrypt_is_aes_xts_cipher(inode)) {
+		pr_err("f2fs alghoritm is not supported by pfk\n");
+		return -EINVAL;
+	}
+
+	if (algo)
+		*algo = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+
+	return 0;
+}
+
+
+int pfk_f2fs_parse_inode(const struct bio *bio,
+		const struct inode *inode,
+		struct pfk_key_info *key_info,
+		enum ice_cryto_algo_mode *algo,
+		bool *is_pfe)
+{
+	int ret = 0;
+
+	if (!is_pfe)
+		return -EINVAL;
+
+	/*
+	 * only a few errors below can indicate that
+	 * this function was not invoked within PFE context,
+	 * otherwise we will consider it PFE
+	 */
+	*is_pfe = true;
+
+	if (!pfk_f2fs_is_ready())
+		return -ENODEV;
+
+	if (!inode)
+		return -EINVAL;
+
+	if (!key_info)
+		return -EINVAL;
+
+	key_info->key = fscrypt_get_ice_encryption_key(inode);
+	if (!key_info->key) {
+		pr_err("could not parse key from f2fs\n");
+		return -EINVAL;
+	}
+
+	key_info->key_size = fscrypt_get_ice_encryption_key_size(inode);
+	if (!key_info->key_size) {
+		pr_err("could not parse key size from f2fs\n");
+		return -EINVAL;
+	}
+
+	key_info->salt = fscrypt_get_ice_encryption_salt(inode);
+	if (!key_info->salt) {
+		pr_err("could not parse salt from f2fs\n");
+		return -EINVAL;
+	}
+
+	key_info->salt_size = fscrypt_get_ice_encryption_salt_size(inode);
+	if (!key_info->salt_size) {
+		pr_err("could not parse salt size from f2fs\n");
+		return -EINVAL;
+	}
+
+	ret = pfk_f2fs_parse_cipher(inode, algo);
+	if (ret != 0) {
+		pr_err("not supported cipher\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+bool pfk_f2fs_allow_merge_bio(const struct bio *bio1,
+		const struct bio *bio2, const struct inode *inode1,
+		const struct inode *inode2)
+{
+	bool mergeable;
+
+	/* if there is no f2fs pfk, don't disallow merging blocks */
+	if (!pfk_f2fs_is_ready())
+		return true;
+
+	if (!inode1 || !inode2)
+		return false;
+
+	mergeable = fscrypt_is_ice_encryption_info_equal(inode1, inode2);
+	if (!mergeable)
+		return false;
+
+
+	/* ICE allows only consecutive iv_key stream. */
+	if (!bio_dun(bio1) && !bio_dun(bio2))
+		return true;
+	else if (!bio_dun(bio1) || !bio_dun(bio2))
+		return false;
+
+	return bio_end_dun(bio1) == bio_dun(bio2);
+}
diff --git a/security/pfe/pfk_f2fs.h b/security/pfe/pfk_f2fs.h
new file mode 100644
index 0000000..551d529
--- /dev/null
+++ b/security/pfe/pfk_f2fs.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PFK_F2FS_H_
+#define _PFK_F2FS_H_
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <crypto/ice.h>
+#include "pfk_internal.h"
+
+bool pfk_is_f2fs_type(const struct inode *inode);
+
+int pfk_f2fs_parse_inode(const struct bio *bio,
+		const struct inode *inode,
+		struct pfk_key_info *key_info,
+		enum ice_cryto_algo_mode *algo,
+		bool *is_pfe);
+
+bool pfk_f2fs_allow_merge_bio(const struct bio *bio1,
+	const struct bio *bio2, const struct inode *inode1,
+	const struct inode *inode2);
+
+int __init pfk_f2fs_init(void);
+
+void pfk_f2fs_deinit(void);
+
+#endif /* _PFK_F2FS_H_ */
diff --git a/security/pfe/pfk_ice.c b/security/pfe/pfk_ice.c
new file mode 100644
index 0000000..16ed516
--- /dev/null
+++ b/security/pfe/pfk_ice.c
@@ -0,0 +1,189 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/async.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <soc/qcom/scm.h>
+#include <linux/device-mapper.h>
+#include <soc/qcom/qseecomi.h>
+#include <crypto/ice.h>
+#include "pfk_ice.h"
+
+
+/**********************************/
+/** global definitions		 **/
+/**********************************/
+
+#define TZ_ES_SET_ICE_KEY 0x2
+#define TZ_ES_INVALIDATE_ICE_KEY 0x3
+
+/* index 0 and 1 is reserved for FDE */
+#define MIN_ICE_KEY_INDEX 2
+
+#define MAX_ICE_KEY_INDEX 31
+
+
+#define TZ_ES_SET_ICE_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, TZ_ES_SET_ICE_KEY)
+
+
+#define TZ_ES_INVALIDATE_ICE_KEY_ID \
+		TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \
+			TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY)
+
+
+#define TZ_ES_SET_ICE_KEY_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_5( \
+		TZ_SYSCALL_PARAM_TYPE_VAL, \
+		TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
+		TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1( \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define ICE_KEY_SIZE 32
+#define ICE_SALT_SIZE 32
+
+static uint8_t ice_key[ICE_KEY_SIZE];
+static uint8_t ice_salt[ICE_KEY_SIZE];
+
+int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
+			char *storage_type)
+{
+	struct scm_desc desc = {0};
+	int ret, ret1;
+	char *tzbuf_key = (char *)ice_key;
+	char *tzbuf_salt = (char *)ice_salt;
+	char *s_type = storage_type;
+
+	uint32_t smc_id = 0;
+	u32 tzbuflen_key = sizeof(ice_key);
+	u32 tzbuflen_salt = sizeof(ice_salt);
+
+	if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
+		pr_err("%s Invalid index %d\n", __func__, index);
+		return -EINVAL;
+	}
+	if (!key || !salt) {
+		pr_err("%s Invalid key/salt\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!tzbuf_key || !tzbuf_salt) {
+		pr_err("%s No Memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	if (s_type == NULL) {
+		pr_err("%s Invalid Storage type\n", __func__);
+		return -EINVAL;
+	}
+
+	memset(tzbuf_key, 0, tzbuflen_key);
+	memset(tzbuf_salt, 0, tzbuflen_salt);
+
+	memcpy(ice_key, key, tzbuflen_key);
+	memcpy(ice_salt, salt, tzbuflen_salt);
+
+	dmac_flush_range(tzbuf_key, tzbuf_key + tzbuflen_key);
+	dmac_flush_range(tzbuf_salt, tzbuf_salt + tzbuflen_salt);
+
+	smc_id = TZ_ES_SET_ICE_KEY_ID;
+
+	desc.arginfo = TZ_ES_SET_ICE_KEY_PARAM_ID;
+	desc.args[0] = index;
+	desc.args[1] = virt_to_phys(tzbuf_key);
+	desc.args[2] = tzbuflen_key;
+	desc.args[3] = virt_to_phys(tzbuf_salt);
+	desc.args[4] = tzbuflen_salt;
+
+	ret = qcom_ice_setup_ice_hw((const char *)s_type, true);
+
+	if (ret) {
+		pr_err("%s: could not enable clocks: %d\n", __func__, ret);
+		goto out;
+	}
+
+	ret = scm_call2(smc_id, &desc);
+
+	if (ret) {
+		pr_err("%s: Set Key Error: %d\n", __func__, ret);
+		if (ret == -EBUSY) {
+			if (qcom_ice_setup_ice_hw((const char *)s_type, false))
+				pr_err("%s: clock disable failed\n", __func__);
+			goto out;
+		}
+		/*Try to invalidate the key to keep ICE in proper state*/
+		smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
+		desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
+		desc.args[0] = index;
+		ret1 = scm_call2(smc_id, &desc);
+		if (ret1)
+			pr_err("%s: Invalidate Key Error: %d\n", __func__,
+					ret1);
+		goto out;
+	}
+	ret = qcom_ice_setup_ice_hw((const char *)s_type, false);
+
+out:
+	return ret;
+}
+
+int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type)
+{
+	struct scm_desc desc = {0};
+	int ret;
+
+	uint32_t smc_id = 0;
+
+	if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
+		pr_err("%s Invalid index %d\n", __func__, index);
+		return -EINVAL;
+	}
+
+	if (storage_type == NULL) {
+		pr_err("%s Invalid Storage type\n", __func__);
+		return -EINVAL;
+	}
+
+	smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
+
+	desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
+	desc.args[0] = index;
+
+	ret = qcom_ice_setup_ice_hw((const char *)storage_type, true);
+
+	if (ret) {
+		pr_err("%s: could not enable clocks: 0x%x\n", __func__, ret);
+		return ret;
+	}
+
+	ret = scm_call2(smc_id, &desc);
+
+	if (ret) {
+		pr_err("%s: Error: 0x%x\n", __func__, ret);
+		if (qcom_ice_setup_ice_hw((const char *)storage_type, false))
+			pr_err("%s: could not disable clocks\n", __func__);
+	} else {
+		ret = qcom_ice_setup_ice_hw((const char *)storage_type, false);
+	}
+
+	return ret;
+}
diff --git a/security/pfe/pfk_ice.h b/security/pfe/pfk_ice.h
new file mode 100644
index 0000000..31772e7
--- /dev/null
+++ b/security/pfe/pfk_ice.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_ICE_H_
+#define PFK_ICE_H_
+
+/*
+ * PFK ICE
+ *
+ * ICE keys configuration through scm calls.
+ *
+ */
+
+#include <linux/types.h>
+
+int pfk_ice_init(void);
+int pfk_ice_deinit(void);
+
+int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
+			char *storage_type);
+int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type);
+
+
+#endif /* PFK_ICE_H_ */
diff --git a/security/pfe/pfk_internal.h b/security/pfe/pfk_internal.h
new file mode 100644
index 0000000..3214327
--- /dev/null
+++ b/security/pfe/pfk_internal.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PFK_INTERNAL_H_
+#define _PFK_INTERNAL_H_
+
+#include <linux/types.h>
+#include <crypto/ice.h>
+
+struct pfk_key_info {
+	const unsigned char *key;
+	const unsigned char *salt;
+	size_t key_size;
+	size_t salt_size;
+};
+
+int pfk_key_size_to_key_type(size_t key_size,
+	enum ice_crpto_key_size *key_size_type);
+
+bool pfe_is_inode_filesystem_type(const struct inode *inode,
+	const char *fs_type);
+
+char *inode_to_filename(const struct inode *inode);
+
+#endif /* _PFK_INTERNAL_H_ */
diff --git a/security/pfe/pfk_kc.c b/security/pfe/pfk_kc.c
new file mode 100644
index 0000000..eecc026
--- /dev/null
+++ b/security/pfe/pfk_kc.c
@@ -0,0 +1,906 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * PFK Key Cache
+ *
+ * Key Cache used internally in PFK.
+ * The purpose of the cache is to save access time to QSEE when loading keys.
+ * Currently the cache is the same size as the total number of keys that can
+ * be loaded to ICE. Since this number is relatively small, the algorithms for
+ * cache eviction are simple, linear and based on last usage timestamp, i.e
+ * the node that will be evicted is the one with the oldest timestamp.
+ * Empty entries always have the oldest timestamp.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <crypto/ice.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+
+#include "pfk_kc.h"
+#include "pfk_ice.h"
+
+
+/** the first available index in ice engine */
+#define PFK_KC_STARTING_INDEX 2
+
+/** currently the only supported key and salt sizes */
+#define PFK_KC_KEY_SIZE 32
+#define PFK_KC_SALT_SIZE 32
+
+/** Table size */
+/* TODO replace by some constant from ice.h */
+#define PFK_KC_TABLE_SIZE ((32) - (PFK_KC_STARTING_INDEX))
+
+/** The maximum key and salt size */
+#define PFK_MAX_KEY_SIZE PFK_KC_KEY_SIZE
+#define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE
+#define PFK_UFS "ufs"
+
+static DEFINE_SPINLOCK(kc_lock);
+static unsigned long flags;
+static bool kc_ready;
+static char *s_type = "sdcc";
+
+/**
+ * enum pfk_kc_entry_state - state of the entry inside kc table
+ *
+ * @FREE:		   entry is free
+ * @ACTIVE_ICE_PRELOAD:    entry is actively used by ICE engine
+			   and cannot be used by others. SCM call
+			   to load key to ICE is pending to be performed
+ * @ACTIVE_ICE_LOADED:     entry is actively used by ICE engine and
+			   cannot be used by others. SCM call to load the
+			   key to ICE was successfully executed and key is
+			   now loaded
+ * @INACTIVE_INVALIDATING: entry is being invalidated during file close
+			   and cannot be used by others until invalidation
+			   is complete
+ * @INACTIVE:		   entry's key is already loaded, but is not
+			   currently being used. It can be re-used for
+			   optimization and to avoid SCM call cost or
+			   it can be taken by another key if there are
+			   no FREE entries
+ * @SCM_ERROR:		   error occurred while scm call was performed to
+			   load the key to ICE
+ */
+enum pfk_kc_entry_state {
+	FREE,
+	ACTIVE_ICE_PRELOAD,
+	ACTIVE_ICE_LOADED,
+	INACTIVE_INVALIDATING,
+	INACTIVE,
+	SCM_ERROR
+};
+
+struct kc_entry {
+	 unsigned char key[PFK_MAX_KEY_SIZE];
+	 size_t key_size;
+
+	 unsigned char salt[PFK_MAX_SALT_SIZE];
+	 size_t salt_size;
+
+	 u64 time_stamp;
+	 u32 key_index;
+
+	 struct task_struct *thread_pending;
+
+	 enum pfk_kc_entry_state state;
+
+	 /* ref count for the number of requests in the HW queue for this key */
+	 int loaded_ref_cnt;
+	 int scm_error;
+};
+
+static struct kc_entry kc_table[PFK_KC_TABLE_SIZE];
+
+/**
+ * kc_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the key cache is ready.
+ */
+static inline bool kc_is_ready(void)
+{
+	return kc_ready;
+}
+
+static inline void kc_spin_lock(void)
+{
+	spin_lock_irqsave(&kc_lock, flags);
+}
+
+static inline void kc_spin_unlock(void)
+{
+	spin_unlock_irqrestore(&kc_lock, flags);
+}
+
+/**
+ * kc_entry_is_available() - checks whether the entry is available
+ *
+ * Return true if it is , false otherwise or if invalid
+ * Should be invoked under spinlock
+ */
+static bool kc_entry_is_available(const struct kc_entry *entry)
+{
+	if (!entry)
+		return false;
+
+	return (entry->state == FREE || entry->state == INACTIVE);
+}
+
+/**
+ * kc_entry_wait_till_available() - waits till entry is available
+ *
+ * Returns 0 in case of success or -ERESTARTSYS if the wait was interrupted
+ * by signal
+ *
+ * Should be invoked under spinlock
+ */
+static int kc_entry_wait_till_available(struct kc_entry *entry)
+{
+	int res = 0;
+
+	while (!kc_entry_is_available(entry)) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (signal_pending(current)) {
+			res = -ERESTARTSYS;
+			break;
+		}
+		/* assuming only one thread can try to invalidate
+		 * the same entry
+		 */
+		entry->thread_pending = current;
+		kc_spin_unlock();
+		schedule();
+		kc_spin_lock();
+	}
+	set_current_state(TASK_RUNNING);
+
+	return res;
+}
+
+/**
+ * kc_entry_start_invalidating() - moves entry to state
+ *			           INACTIVE_INVALIDATING
+ *				   If entry is in use, waits till
+ *				   it gets available
+ * @entry: pointer to entry
+ *
+ * Return 0 in case of success, otherwise error
+ * Should be invoked under spinlock
+ */
+static int kc_entry_start_invalidating(struct kc_entry *entry)
+{
+	int res;
+
+	res = kc_entry_wait_till_available(entry);
+	if (res)
+		return res;
+
+	entry->state = INACTIVE_INVALIDATING;
+
+	return 0;
+}
+
+/**
+ * kc_entry_finish_invalidating() - moves entry to state FREE
+ *				    wakes up all the tasks waiting
+ *				    on it
+ *
+ * @entry: pointer to entry
+ *
+ * Return 0 in case of success, otherwise error
+ * Should be invoked under spinlock
+ */
+static void kc_entry_finish_invalidating(struct kc_entry *entry)
+{
+	if (!entry)
+		return;
+
+	if (entry->state != INACTIVE_INVALIDATING)
+		return;
+
+	entry->state = FREE;
+}
+
+/**
+ * kc_min_entry() - compare two entries to find one with minimal time
+ * @a: ptr to the first entry. If NULL the other entry will be returned
+ * @b: pointer to the second entry
+ *
+ * Return the entry which timestamp is the minimal, or b if a is NULL
+ */
+static inline struct kc_entry *kc_min_entry(struct kc_entry *a,
+		struct kc_entry *b)
+{
+	if (!a)
+		return b;
+
+	if (time_before64(b->time_stamp, a->time_stamp))
+		return b;
+
+	return a;
+}
+
+/**
+ * kc_entry_at_index() - return entry at specific index
+ * @index: index of entry to be accessed
+ *
+ * Return entry
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_entry_at_index(int index)
+{
+	return &(kc_table[index]);
+}
+
+/**
+ * kc_find_key_at_index() - find kc entry starting at specific index
+ * @key: key to look for
+ * @key_size: the key size
+ * @salt: salt to look for
+ * @salt_size: the salt size
+ * @sarting_index: index to start search with, if entry found, updated with
+ * index of that entry
+ *
+ * Return entry or NULL in case of error
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
+	size_t key_size, const unsigned char *salt, size_t salt_size,
+	int *starting_index)
+{
+	struct kc_entry *entry = NULL;
+	int i = 0;
+
+	for (i = *starting_index; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+
+		if (salt != NULL) {
+			if (entry->salt_size != salt_size)
+				continue;
+
+			if (memcmp(entry->salt, salt, salt_size) != 0)
+				continue;
+		}
+
+		if (entry->key_size != key_size)
+			continue;
+
+		if (memcmp(entry->key, key, key_size) == 0) {
+			*starting_index = i;
+			return entry;
+		}
+	}
+
+	return NULL;
+}
+
+/**
+ * kc_find_key() - find kc entry
+ * @key: key to look for
+ * @key_size: the key size
+ * @salt: salt to look for
+ * @salt_size: the salt size
+ *
+ * Return entry or NULL in case of error
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size)
+{
+	int index = 0;
+
+	return kc_find_key_at_index(key, key_size, salt, salt_size, &index);
+}
+
+/**
+ * kc_find_oldest_entry_non_locked() - finds the entry with minimal timestamp
+ * that is not locked
+ *
+ * Returns entry with minimal timestamp. Empty entries have timestamp
+ * of 0, therefore they are returned first.
+ * If all the entries are locked, will return NULL
+ * Should be invoked under spin lock
+ */
+static struct kc_entry *kc_find_oldest_entry_non_locked(void)
+{
+	struct kc_entry *curr_min_entry = NULL;
+	struct kc_entry *entry = NULL;
+	int i = 0;
+
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+
+		if (entry->state == FREE)
+			return entry;
+
+		if (entry->state == INACTIVE)
+			curr_min_entry = kc_min_entry(curr_min_entry, entry);
+	}
+
+	return curr_min_entry;
+}
+
+/**
+ * kc_update_timestamp() - updates timestamp of entry to current
+ *
+ * @entry: entry to update
+ *
+ */
+static void kc_update_timestamp(struct kc_entry *entry)
+{
+	if (!entry)
+		return;
+
+	entry->time_stamp = get_jiffies_64();
+}
+
+/**
+ * kc_clear_entry() - clear the key from entry and mark entry not in use
+ *
+ * @entry: pointer to entry
+ *
+ * Should be invoked under spinlock
+ */
+static void kc_clear_entry(struct kc_entry *entry)
+{
+	if (!entry)
+		return;
+
+	memset(entry->key, 0, entry->key_size);
+	memset(entry->salt, 0, entry->salt_size);
+
+	entry->key_size = 0;
+	entry->salt_size = 0;
+
+	entry->time_stamp = 0;
+	entry->scm_error = 0;
+
+	entry->state = FREE;
+
+	entry->loaded_ref_cnt = 0;
+	entry->thread_pending = NULL;
+}
+
+/**
+ * kc_update_entry() - replaces the key in given entry and
+ *			loads the new key to ICE
+ *
+ * @entry: entry to replace key in
+ * @key: key
+ * @key_size: key_size
+ * @salt: salt
+ * @salt_size: salt_size
+ *
+ * The previous key is securely released and wiped, the new one is loaded
+ * to ICE.
+ * Should be invoked under spinlock
+ */
+static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
+	size_t key_size, const unsigned char *salt, size_t salt_size)
+{
+	int ret;
+
+	kc_clear_entry(entry);
+
+	memcpy(entry->key, key, key_size);
+	entry->key_size = key_size;
+
+	memcpy(entry->salt, salt, salt_size);
+	entry->salt_size = salt_size;
+
+	/* Mark entry as no longer free before releasing the lock */
+	entry->state = ACTIVE_ICE_PRELOAD;
+	kc_spin_unlock();
+
+	ret = qti_pfk_ice_set_key(entry->key_index, entry->key,
+			entry->salt, s_type);
+
+	kc_spin_lock();
+	return ret;
+}
+
+/**
+ * pfk_kc_init() - init function
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_init(void)
+{
+	int i = 0;
+	struct kc_entry *entry = NULL;
+
+	kc_spin_lock();
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+		entry->key_index = PFK_KC_STARTING_INDEX + i;
+	}
+	kc_ready = true;
+	kc_spin_unlock();
+
+	return 0;
+}
+
+/**
+ * pfk_kc_denit() - deinit function
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_deinit(void)
+{
+	int res = pfk_kc_clear();
+	kc_ready = false;
+
+	return res;
+}
+
+/**
+ * pfk_kc_load_key_start() - retrieve the key from cache or add it if
+ * it's not there and return the ICE hw key index in @key_index.
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the salt
+ * @salt_size: the size of the salt
+ * @key_index: the pointer to key_index where the output will be stored
+ * @async: whether scm calls are allowed in the caller context
+ *
+ * If key is present in cache, than the key_index will be retrieved from cache.
+ * If it is not present, the oldest entry from kc table will be evicted,
+ * the key will be loaded to ICE via QSEE to the index that is the evicted
+ * entry number and stored in cache.
+ * Entry that is going to be used is marked as being used, it will mark
+ * as not being used when ICE finishes using it and pfk_kc_load_key_end
+ * will be invoked.
+ * As QSEE calls can only be done from a non-atomic context, when @async flag
+ * is set to 'false', it specifies that it is ok to make the calls in the
+ * current context. Otherwise, when @async is set, the caller should retry the
+ * call again from a different context, and -EAGAIN error will be returned.
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size, u32 *key_index,
+		bool async)
+{
+	int ret = 0;
+	struct kc_entry *entry = NULL;
+	bool entry_exists = false;
+
+	if (!kc_is_ready())
+		return -ENODEV;
+
+	if (!key || !salt || !key_index) {
+		pr_err("%s key/salt/key_index NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (key_size != PFK_KC_KEY_SIZE) {
+		pr_err("unsupported key size %zu\n", key_size);
+		return -EINVAL;
+	}
+
+	if (salt_size != PFK_KC_SALT_SIZE) {
+		pr_err("unsupported salt size %zu\n", salt_size);
+		return -EINVAL;
+	}
+
+	kc_spin_lock();
+
+	entry = kc_find_key(key, key_size, salt, salt_size);
+	if (!entry) {
+		if (async) {
+			pr_debug("%s task will populate entry\n", __func__);
+			kc_spin_unlock();
+			return -EAGAIN;
+		}
+
+		entry = kc_find_oldest_entry_non_locked();
+		if (!entry) {
+			/* could not find a single non locked entry,
+			 * return EBUSY to upper layers so that the
+			 * request will be rescheduled
+			 */
+			kc_spin_unlock();
+			return -EBUSY;
+		}
+	} else {
+		entry_exists = true;
+	}
+
+	pr_debug("entry with index %d is in state %d\n",
+		entry->key_index, entry->state);
+
+	switch (entry->state) {
+	case (INACTIVE):
+		if (entry_exists) {
+			kc_update_timestamp(entry);
+			entry->state = ACTIVE_ICE_LOADED;
+
+			if (!strcmp(s_type, (char *)PFK_UFS)) {
+				if (async)
+					entry->loaded_ref_cnt++;
+			} else {
+				entry->loaded_ref_cnt++;
+			}
+			break;
+		}
+	case (FREE):
+		ret = kc_update_entry(entry, key, key_size, salt, salt_size);
+		if (ret) {
+			entry->state = SCM_ERROR;
+			entry->scm_error = ret;
+			pr_err("%s: key load error (%d)\n", __func__, ret);
+		} else {
+			kc_update_timestamp(entry);
+			entry->state = ACTIVE_ICE_LOADED;
+
+			/*
+			 * In case of UFS only increase ref cnt for async calls,
+			 * sync calls from within work thread do not pass
+			 * requests further to HW
+			 */
+			if (!strcmp(s_type, (char *)PFK_UFS)) {
+				if (async)
+					entry->loaded_ref_cnt++;
+			} else {
+				entry->loaded_ref_cnt++;
+			}
+		}
+		break;
+	case (ACTIVE_ICE_PRELOAD):
+	case (INACTIVE_INVALIDATING):
+		ret = -EAGAIN;
+		break;
+	case (ACTIVE_ICE_LOADED):
+		kc_update_timestamp(entry);
+
+		if (!strcmp(s_type, (char *)PFK_UFS)) {
+			if (async)
+				entry->loaded_ref_cnt++;
+		} else {
+			entry->loaded_ref_cnt++;
+		}
+		break;
+	case(SCM_ERROR):
+		ret = entry->scm_error;
+		kc_clear_entry(entry);
+		entry->state = FREE;
+		break;
+	default:
+		pr_err("invalid state %d for entry with key index %d\n",
+			entry->state, entry->key_index);
+		ret = -EINVAL;
+	}
+
+	*key_index = entry->key_index;
+	kc_spin_unlock();
+
+	return ret;
+}
+
+/**
+ * pfk_kc_load_key_end() - finish the process of key loading that was started
+ *						   by pfk_kc_load_key_start
+ *						   by marking the entry as not
+ *						   being in use
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the salt
+ * @salt_size: the size of the salt
+ *
+ */
+void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size)
+{
+	struct kc_entry *entry = NULL;
+	struct task_struct *tmp_pending = NULL;
+	int ref_cnt = 0;
+
+	if (!kc_is_ready())
+		return;
+
+	if (!key || !salt)
+		return;
+
+	if (key_size != PFK_KC_KEY_SIZE)
+		return;
+
+	if (salt_size != PFK_KC_SALT_SIZE)
+		return;
+
+	kc_spin_lock();
+
+	entry = kc_find_key(key, key_size, salt, salt_size);
+	if (!entry) {
+		kc_spin_unlock();
+		pr_err("internal error, there should an entry to unlock\n");
+
+		return;
+	}
+	ref_cnt = --entry->loaded_ref_cnt;
+
+	if (ref_cnt < 0)
+		pr_err("internal error, ref count should never be negative\n");
+
+	if (!ref_cnt) {
+		entry->state = INACTIVE;
+		/*
+		 * wake-up invalidation if it's waiting
+		 * for the entry to be released
+		 */
+		if (entry->thread_pending) {
+			tmp_pending = entry->thread_pending;
+			entry->thread_pending = NULL;
+
+			kc_spin_unlock();
+			wake_up_process(tmp_pending);
+			return;
+		}
+	}
+
+	kc_spin_unlock();
+}
+
+/**
+ * pfk_kc_remove_key() - remove the key from cache and from ICE engine
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the key
+ * @salt_size: the size of the key
+ *
+ * Return 0 in case of success, error otherwise (also in case of non
+ * (existing key)
+ */
+int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size)
+{
+	struct kc_entry *entry = NULL;
+	int res = 0;
+
+	if (!kc_is_ready())
+		return -ENODEV;
+
+	if (!key)
+		return -EINVAL;
+
+	if (!salt)
+		return -EINVAL;
+
+	if (key_size != PFK_KC_KEY_SIZE)
+		return -EINVAL;
+
+	if (salt_size != PFK_KC_SALT_SIZE)
+		return -EINVAL;
+
+	kc_spin_lock();
+
+	entry = kc_find_key(key, key_size, salt, salt_size);
+	if (!entry) {
+		pr_debug("%s: key does not exist\n", __func__);
+		kc_spin_unlock();
+		return -EINVAL;
+	}
+
+	res = kc_entry_start_invalidating(entry);
+	if (res != 0) {
+		kc_spin_unlock();
+		return res;
+	}
+	kc_clear_entry(entry);
+
+	kc_spin_unlock();
+
+	qti_pfk_ice_invalidate_key(entry->key_index, s_type);
+
+	kc_spin_lock();
+	kc_entry_finish_invalidating(entry);
+	kc_spin_unlock();
+
+	return 0;
+}
+
+/**
+ * pfk_kc_remove_key() - remove the key from cache and from ICE engine
+ * when no salt is available. Will only search key part, if there are several,
+ * all will be removed
+ *
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ *
+ * Return 0 in case of success, error otherwise (also for non-existing key)
+ */
+int pfk_kc_remove_key(const unsigned char *key, size_t key_size)
+{
+	struct kc_entry *entry = NULL;
+	int index = 0;
+	int temp_indexes[PFK_KC_TABLE_SIZE] = {0};
+	int temp_indexes_size = 0;
+	int i = 0;
+	int res = 0;
+
+	if (!kc_is_ready())
+		return -ENODEV;
+
+	if (!key)
+		return -EINVAL;
+
+	if (key_size != PFK_KC_KEY_SIZE)
+		return -EINVAL;
+
+	memset(temp_indexes, -1, sizeof(temp_indexes));
+
+	kc_spin_lock();
+
+	entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
+	if (!entry) {
+		pr_err("%s: key does not exist\n", __func__);
+		kc_spin_unlock();
+		return -EINVAL;
+	}
+
+	res = kc_entry_start_invalidating(entry);
+	if (res != 0) {
+		kc_spin_unlock();
+		return res;
+	}
+
+	temp_indexes[temp_indexes_size++] = index;
+	kc_clear_entry(entry);
+
+	/* let's clean additional entries with the same key if there are any */
+	do {
+		index++;
+		entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
+		if (!entry)
+			break;
+
+		res = kc_entry_start_invalidating(entry);
+		if (res != 0) {
+			kc_spin_unlock();
+			goto out;
+		}
+
+		temp_indexes[temp_indexes_size++] = index;
+
+		kc_clear_entry(entry);
+
+
+	} while (true);
+
+	kc_spin_unlock();
+
+	temp_indexes_size--;
+	for (i = temp_indexes_size; i >= 0 ; i--)
+		qti_pfk_ice_invalidate_key(
+			kc_entry_at_index(temp_indexes[i])->key_index,
+					s_type);
+
+	/* fall through */
+	res = 0;
+
+out:
+	kc_spin_lock();
+	for (i = temp_indexes_size; i >= 0 ; i--)
+		kc_entry_finish_invalidating(
+				kc_entry_at_index(temp_indexes[i]));
+	kc_spin_unlock();
+
+	return res;
+}
+
+/**
+ * pfk_kc_clear() - clear the table and remove all keys from ICE
+ *
+ * Return 0 on success, error otherwise
+ *
+ */
+int pfk_kc_clear(void)
+{
+	struct kc_entry *entry = NULL;
+	int i = 0;
+	int res = 0;
+
+	if (!kc_is_ready())
+		return -ENODEV;
+
+	kc_spin_lock();
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+		res = kc_entry_start_invalidating(entry);
+		if (res != 0) {
+			kc_spin_unlock();
+			goto out;
+		}
+		kc_clear_entry(entry);
+	}
+	kc_spin_unlock();
+
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
+		qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index,
+					s_type);
+
+	/* fall through */
+	res = 0;
+out:
+	kc_spin_lock();
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
+		kc_entry_finish_invalidating(kc_entry_at_index(i));
+	kc_spin_unlock();
+
+	return res;
+}
+
+/**
+ * pfk_kc_clear_on_reset() - clear the table and remove all keys from ICE
+ * The assumption is that at this point we don't have any pending transactions
+ * Also, there is no need to clear keys from ICE
+ *
+ * Return 0 on success, error otherwise
+ *
+ */
+void pfk_kc_clear_on_reset(void)
+{
+	struct kc_entry *entry = NULL;
+	int i = 0;
+
+	if (!kc_is_ready())
+		return;
+
+	kc_spin_lock();
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+		kc_clear_entry(entry);
+	}
+	kc_spin_unlock();
+}
+
+static int pfk_kc_find_storage_type(char **device)
+{
+	char boot[20] = {'\0'};
+	char *match = (char *)strnstr(saved_command_line,
+				"androidboot.bootdevice=",
+				strlen(saved_command_line));
+	if (match) {
+		memcpy(boot, (match + strlen("androidboot.bootdevice=")),
+			sizeof(boot) - 1);
+		if (strnstr(boot, PFK_UFS, strlen(boot)))
+			*device = PFK_UFS;
+
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int __init pfk_kc_pre_init(void)
+{
+	return pfk_kc_find_storage_type(&s_type);
+}
+
+static void __exit pfk_kc_exit(void)
+{
+	s_type = NULL;
+}
+
+module_init(pfk_kc_pre_init);
+module_exit(pfk_kc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Per-File-Key-KC driver");
diff --git a/security/pfe/pfk_kc.h b/security/pfe/pfk_kc.h
new file mode 100644
index 0000000..6adeee2
--- /dev/null
+++ b/security/pfe/pfk_kc.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_KC_H_
+#define PFK_KC_H_
+
+#include <linux/types.h>
+
+int pfk_kc_init(void);
+int pfk_kc_deinit(void);
+int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size, u32 *key_index,
+		bool async);
+void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size);
+int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size);
+int pfk_kc_remove_key(const unsigned char *key, size_t key_size);
+int pfk_kc_clear(void);
+void pfk_kc_clear_on_reset(void);
+extern char *saved_command_line;
+
+
+#endif /* PFK_KC_H_ */
diff --git a/security/security.c b/security/security.c
index e43c50c..35c8dce 100644
--- a/security/security.c
+++ b/security/security.c
@@ -525,6 +525,14 @@
 }
 EXPORT_SYMBOL_GPL(security_inode_create);
 
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+			       umode_t mode)
+{
+	if (unlikely(IS_PRIVATE(dir)))
+		return 0;
+	return call_int_hook(inode_post_create, 0, dir, dentry, mode);
+}
+
 int security_inode_link(struct dentry *old_dentry, struct inode *dir,
 			 struct dentry *new_dentry)
 {
@@ -1700,6 +1708,8 @@
 	.inode_init_security =
 		LIST_HEAD_INIT(security_hook_heads.inode_init_security),
 	.inode_create =	LIST_HEAD_INIT(security_hook_heads.inode_create),
+	.inode_post_create =
+		LIST_HEAD_INIT(security_hook_heads.inode_post_create),
 	.inode_link =	LIST_HEAD_INIT(security_hook_heads.inode_link),
 	.inode_unlink =	LIST_HEAD_INIT(security_hook_heads.inode_unlink),
 	.inode_symlink =
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 52f3c55..84d9a2e 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -348,26 +348,27 @@
 	struct avc_xperms_decision_node *xpd_node;
 	struct extended_perms_decision *xpd;
 
-	xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT);
+	xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
+			GFP_NOWAIT | __GFP_NOWARN);
 	if (!xpd_node)
 		return NULL;
 
 	xpd = &xpd_node->xpd;
 	if (which & XPERMS_ALLOWED) {
 		xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
-						GFP_NOWAIT);
+						GFP_NOWAIT | __GFP_NOWARN);
 		if (!xpd->allowed)
 			goto error;
 	}
 	if (which & XPERMS_AUDITALLOW) {
 		xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
-						GFP_NOWAIT);
+						GFP_NOWAIT | __GFP_NOWARN);
 		if (!xpd->auditallow)
 			goto error;
 	}
 	if (which & XPERMS_DONTAUDIT) {
 		xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
-						GFP_NOWAIT);
+						GFP_NOWAIT | __GFP_NOWARN);
 		if (!xpd->dontaudit)
 			goto error;
 	}
@@ -395,7 +396,8 @@
 {
 	struct avc_xperms_node *xp_node;
 
-	xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT);
+	xp_node = kmem_cache_zalloc(avc_xperms_cachep,
+			GFP_NOWAIT | __GFP_NOWARN);
 	if (!xp_node)
 		return xp_node;
 	INIT_LIST_HEAD(&xp_node->xpd_head);
@@ -548,7 +550,7 @@
 {
 	struct avc_node *node;
 
-	node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT);
+	node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT | __GFP_NOWARN);
 	if (!node)
 		goto out;
 
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 8e12ffe..5f3fa60 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1092,10 +1092,9 @@
 		goto out_err;
 
 	opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), GFP_ATOMIC);
-	if (!opts->mnt_opts_flags) {
-		kfree(opts->mnt_opts);
+	if (!opts->mnt_opts_flags)
 		goto out_err;
-	}
+
 
 	if (fscontext) {
 		opts->mnt_opts[num_mnt_opts] = fscontext;
@@ -1118,6 +1117,7 @@
 	return 0;
 
 out_err:
+	security_free_mnt_opts(opts);
 	kfree(context);
 	kfree(defcontext);
 	kfree(fscontext);
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 43535cd..60cdcf4 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -25,8 +25,9 @@
 #include <linux/in.h>
 #include <linux/spinlock.h>
 #include <net/net_namespace.h>
-#include "flask.h"
-#include "avc.h"
+//#include "flask.h"
+//#include "avc.h"
+#include "security.h"
 
 struct task_security_struct {
 	u32 osid;		/* SID prior to last execve */
@@ -52,6 +53,8 @@
 	u32 sid;		/* SID of this object */
 	u16 sclass;		/* security class of this object */
 	unsigned char initialized;	/* initialization flag */
+	u32 tag;		/* Per-File-Encryption tag */
+	void *pfk_data; /* Per-File-Key data from ecryptfs */
 	struct mutex lock;
 };
 
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 308a286..b8e98c1 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -12,7 +12,6 @@
 #include <linux/dcache.h>
 #include <linux/magic.h>
 #include <linux/types.h>
-#include "flask.h"
 
 #define SECSID_NULL			0x00000000 /* unspecified SID */
 #define SECSID_WILD			0xffffffff /* wildcard SID */
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 9b517a4..a6b0970 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1434,7 +1434,7 @@
 				      scontext_len, &context, def_sid);
 	if (rc == -EINVAL && force) {
 		context.str = str;
-		context.len = scontext_len;
+		context.len = strlen(str) + 1;
 		str = NULL;
 	} else if (rc)
 		goto out_unlock;
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
index 1fa7076..84ee29c 100644
--- a/sound/core/control_compat.c
+++ b/sound/core/control_compat.c
@@ -400,8 +400,7 @@
 	if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) ||
 	    copy_from_user(&data->type, &data32->type, 3 * sizeof(u32)))
 		goto error;
-	if (get_user(data->owner, &data32->owner) ||
-	    get_user(data->type, &data32->type))
+	if (get_user(data->owner, &data32->owner))
 		goto error;
 	switch (data->type) {
 	case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index e1512ae..0c81e26 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -426,6 +426,8 @@
 		return -ENOTTY;
 	if (substream->stream != dir)
 		return -EINVAL;
+	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
+		return -EBADFD;
 
 	if ((ch = substream->runtime->channels) > 128)
 		return -EINVAL;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 019f60b..2df7e6b 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2765,6 +2765,7 @@
 	sync_ptr.s.status.hw_ptr = status->hw_ptr;
 	sync_ptr.s.status.tstamp = status->tstamp;
 	sync_ptr.s.status.suspended_state = status->suspended_state;
+	sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
 	snd_pcm_stream_unlock_irq(substream);
 	if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
 		return -EFAULT;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 5143801..180261d 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -976,9 +976,9 @@
 	struct snd_rawmidi_runtime *runtime = substream->runtime;
 	unsigned long appl_ptr;
 
-	spin_lock_irqsave(&runtime->lock, flags);
 	if (userbuf)
 		mutex_lock(&runtime->realloc_mutex);
+	spin_lock_irqsave(&runtime->lock, flags);
 	while (count > 0 && runtime->avail) {
 		count1 = runtime->buffer_size - runtime->appl_ptr;
 		if (count1 > count)
diff --git a/sound/core/seq/oss/seq_oss_event.c b/sound/core/seq/oss/seq_oss_event.c
index c390886..86ca584 100644
--- a/sound/core/seq/oss/seq_oss_event.c
+++ b/sound/core/seq/oss/seq_oss_event.c
@@ -26,6 +26,7 @@
 #include <sound/seq_oss_legacy.h>
 #include "seq_oss_readq.h"
 #include "seq_oss_writeq.h"
+#include <linux/nospec.h>
 
 
 /*
@@ -287,10 +288,10 @@
 {
 	struct seq_oss_synthinfo *info;
 
-	if (!snd_seq_oss_synth_is_valid(dp, dev))
+	info = snd_seq_oss_synth_info(dp, dev);
+	if (!info)
 		return -ENXIO;
 
-	info = &dp->synths[dev];
 	switch (info->arg.event_passing) {
 	case SNDRV_SEQ_OSS_PROCESS_EVENTS:
 		if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -298,6 +299,7 @@
 			return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
 		}
 
+		ch = array_index_nospec(ch, info->nr_voices);
 		if (note == 255 && info->ch[ch].note >= 0) {
 			/* volume control */
 			int type;
@@ -347,10 +349,10 @@
 {
 	struct seq_oss_synthinfo *info;
 
-	if (!snd_seq_oss_synth_is_valid(dp, dev))
+	info = snd_seq_oss_synth_info(dp, dev);
+	if (!info)
 		return -ENXIO;
 
-	info = &dp->synths[dev];
 	switch (info->arg.event_passing) {
 	case SNDRV_SEQ_OSS_PROCESS_EVENTS:
 		if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -358,6 +360,7 @@
 			return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
 		}
 
+		ch = array_index_nospec(ch, info->nr_voices);
 		if (info->ch[ch].note >= 0) {
 			note = info->ch[ch].note;
 			info->ch[ch].vel = 0;
@@ -381,7 +384,7 @@
 static int
 set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev)
 {
-	if (! snd_seq_oss_synth_is_valid(dp, dev))
+	if (!snd_seq_oss_synth_info(dp, dev))
 		return -ENXIO;
 	
 	ev->type = type;
@@ -399,7 +402,7 @@
 static int
 set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev)
 {
-	if (! snd_seq_oss_synth_is_valid(dp, dev))
+	if (!snd_seq_oss_synth_info(dp, dev))
 		return -ENXIO;
 	
 	ev->type = type;
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index b30b213..9debd1b 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -29,6 +29,7 @@
 #include "../seq_lock.h"
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/nospec.h>
 
 
 /*
@@ -315,6 +316,7 @@
 {
 	if (dev < 0 || dev >= dp->max_mididev)
 		return NULL;
+	dev = array_index_nospec(dev, dp->max_mididev);
 	return get_mdev(dev);
 }
 
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index cd0e0eb..278ebb9 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -26,6 +26,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/nospec.h>
 
 /*
  * constants
@@ -339,17 +340,13 @@
 	dp->max_synthdev = 0;
 }
 
-/*
- * check if the specified device is MIDI mapped device
- */
-static int
-is_midi_dev(struct seq_oss_devinfo *dp, int dev)
+static struct seq_oss_synthinfo *
+get_synthinfo_nospec(struct seq_oss_devinfo *dp, int dev)
 {
 	if (dev < 0 || dev >= dp->max_synthdev)
-		return 0;
-	if (dp->synths[dev].is_midi)
-		return 1;
-	return 0;
+		return NULL;
+	dev = array_index_nospec(dev, SNDRV_SEQ_OSS_MAX_SYNTH_DEVS);
+	return &dp->synths[dev];
 }
 
 /*
@@ -359,14 +356,20 @@
 get_synthdev(struct seq_oss_devinfo *dp, int dev)
 {
 	struct seq_oss_synth *rec;
-	if (dev < 0 || dev >= dp->max_synthdev)
+	struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
+
+	if (!info)
 		return NULL;
-	if (! dp->synths[dev].opened)
+	if (!info->opened)
 		return NULL;
-	if (dp->synths[dev].is_midi)
-		return &midi_synth_dev;
-	if ((rec = get_sdev(dev)) == NULL)
-		return NULL;
+	if (info->is_midi) {
+		rec = &midi_synth_dev;
+		snd_use_lock_use(&rec->use_lock);
+	} else {
+		rec = get_sdev(dev);
+		if (!rec)
+			return NULL;
+	}
 	if (! rec->opened) {
 		snd_use_lock_free(&rec->use_lock);
 		return NULL;
@@ -402,10 +405,8 @@
 	struct seq_oss_synth *rec;
 	struct seq_oss_synthinfo *info;
 
-	if (snd_BUG_ON(dev < 0 || dev >= dp->max_synthdev))
-		return;
-	info = &dp->synths[dev];
-	if (! info->opened)
+	info = get_synthinfo_nospec(dp, dev);
+	if (!info || !info->opened)
 		return;
 	if (info->sysex)
 		info->sysex->len = 0; /* reset sysex */
@@ -454,12 +455,14 @@
 			    const char __user *buf, int p, int c)
 {
 	struct seq_oss_synth *rec;
+	struct seq_oss_synthinfo *info;
 	int rc;
 
-	if (dev < 0 || dev >= dp->max_synthdev)
+	info = get_synthinfo_nospec(dp, dev);
+	if (!info)
 		return -ENXIO;
 
-	if (is_midi_dev(dp, dev))
+	if (info->is_midi)
 		return 0;
 	if ((rec = get_synthdev(dp, dev)) == NULL)
 		return -ENXIO;
@@ -467,24 +470,25 @@
 	if (rec->oper.load_patch == NULL)
 		rc = -ENXIO;
 	else
-		rc = rec->oper.load_patch(&dp->synths[dev].arg, fmt, buf, p, c);
+		rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c);
 	snd_use_lock_free(&rec->use_lock);
 	return rc;
 }
 
 /*
- * check if the device is valid synth device
+ * check if the device is valid synth device and return the synth info
  */
-int
-snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev)
+struct seq_oss_synthinfo *
+snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, int dev)
 {
 	struct seq_oss_synth *rec;
+
 	rec = get_synthdev(dp, dev);
 	if (rec) {
 		snd_use_lock_free(&rec->use_lock);
-		return 1;
+		return get_synthinfo_nospec(dp, dev);
 	}
-	return 0;
+	return NULL;
 }
 
 
@@ -499,16 +503,18 @@
 	int i, send;
 	unsigned char *dest;
 	struct seq_oss_synth_sysex *sysex;
+	struct seq_oss_synthinfo *info;
 
-	if (! snd_seq_oss_synth_is_valid(dp, dev))
+	info = snd_seq_oss_synth_info(dp, dev);
+	if (!info)
 		return -ENXIO;
 
-	sysex = dp->synths[dev].sysex;
+	sysex = info->sysex;
 	if (sysex == NULL) {
 		sysex = kzalloc(sizeof(*sysex), GFP_KERNEL);
 		if (sysex == NULL)
 			return -ENOMEM;
-		dp->synths[dev].sysex = sysex;
+		info->sysex = sysex;
 	}
 
 	send = 0;
@@ -553,10 +559,12 @@
 int
 snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev)
 {
-	if (! snd_seq_oss_synth_is_valid(dp, dev))
+	struct seq_oss_synthinfo *info = snd_seq_oss_synth_info(dp, dev);
+
+	if (!info)
 		return -EINVAL;
-	snd_seq_oss_fill_addr(dp, ev, dp->synths[dev].arg.addr.client,
-			      dp->synths[dev].arg.addr.port);
+	snd_seq_oss_fill_addr(dp, ev, info->arg.addr.client,
+			      info->arg.addr.port);
 	return 0;
 }
 
@@ -568,16 +576,18 @@
 snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr)
 {
 	struct seq_oss_synth *rec;
+	struct seq_oss_synthinfo *info;
 	int rc;
 
-	if (is_midi_dev(dp, dev))
+	info = get_synthinfo_nospec(dp, dev);
+	if (!info || info->is_midi)
 		return -ENXIO;
 	if ((rec = get_synthdev(dp, dev)) == NULL)
 		return -ENXIO;
 	if (rec->oper.ioctl == NULL)
 		rc = -ENXIO;
 	else
-		rc = rec->oper.ioctl(&dp->synths[dev].arg, cmd, addr);
+		rc = rec->oper.ioctl(&info->arg, cmd, addr);
 	snd_use_lock_free(&rec->use_lock);
 	return rc;
 }
@@ -589,7 +599,10 @@
 int
 snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev)
 {
-	if (! snd_seq_oss_synth_is_valid(dp, dev) || is_midi_dev(dp, dev))
+	struct seq_oss_synthinfo *info;
+
+	info = snd_seq_oss_synth_info(dp, dev);
+	if (!info || info->is_midi)
 		return -ENXIO;
 	ev->type = SNDRV_SEQ_EVENT_OSS;
 	memcpy(ev->data.raw8.d, data, 8);
diff --git a/sound/core/seq/oss/seq_oss_synth.h b/sound/core/seq/oss/seq_oss_synth.h
index 74ac55f..a63f9e2 100644
--- a/sound/core/seq/oss/seq_oss_synth.h
+++ b/sound/core/seq/oss/seq_oss_synth.h
@@ -37,7 +37,8 @@
 void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev);
 int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
 				 const char __user *buf, int p, int c);
-int snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev);
+struct seq_oss_synthinfo *snd_seq_oss_synth_info(struct seq_oss_devinfo *dp,
+						 int dev);
 int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
 			    struct snd_seq_event *ev);
 int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev);
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 2007649..8bdc4c9 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -174,12 +174,12 @@
 			}
 			return;
 		}
+		spin_lock_irqsave(&substream->runtime->lock, flags);
 		if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
 			if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
-				return;
+				goto out;
 			vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
 		}
-		spin_lock_irqsave(&substream->runtime->lock, flags);
 		while (1) {
 			count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
 			if (count <= 0)
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 7622551..d3b6260 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -547,7 +547,7 @@
 	else
 		timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
 	snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
-			  SNDRV_TIMER_EVENT_CONTINUE);
+			  SNDRV_TIMER_EVENT_PAUSE);
  unlock:
 	spin_unlock_irqrestore(&timer->lock, flags);
 	return result;
@@ -569,7 +569,7 @@
 		list_del_init(&timeri->ack_list);
 		list_del_init(&timeri->active_list);
 		snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
-				  SNDRV_TIMER_EVENT_CONTINUE);
+				  SNDRV_TIMER_EVENT_PAUSE);
 		spin_unlock(&timeri->timer->lock);
 	}
 	spin_unlock_irqrestore(&slave_active_lock, flags);
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index 6c58e6f..7c6ef87 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -68,10 +68,13 @@
 		return -ENOMEM;
 	uctl->id = slave->slave.id;
 	err = slave->slave.get(&slave->slave, uctl);
+	if (err < 0)
+		goto error;
 	for (ch = 0; ch < slave->info.count; ch++)
 		slave->vals[ch] = uctl->value.integer.value[ch];
+ error:
 	kfree(uctl);
-	return 0;
+	return err < 0 ? err : 0;
 }
 
 /* get the slave ctl info and save the initial values */
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index dc91002..847f703 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -296,6 +296,8 @@
 		cable->pause |= stream;
 		loopback_timer_stop(dpcm);
 		spin_unlock(&cable->lock);
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+			loopback_active_notify(dpcm);
 		break;
 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
 	case SNDRV_PCM_TRIGGER_RESUME:
@@ -304,6 +306,8 @@
 		cable->pause &= ~stream;
 		loopback_timer_start(dpcm);
 		spin_unlock(&cable->lock);
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+			loopback_active_notify(dpcm);
 		break;
 	default:
 		return -EINVAL;
@@ -828,9 +832,11 @@
 {
 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
 	
+	mutex_lock(&loopback->cable_lock);
 	ucontrol->value.integer.value[0] =
 		loopback->setup[kcontrol->id.subdevice]
 			       [kcontrol->id.device].rate_shift;
+	mutex_unlock(&loopback->cable_lock);
 	return 0;
 }
 
@@ -862,9 +868,11 @@
 {
 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
 	
+	mutex_lock(&loopback->cable_lock);
 	ucontrol->value.integer.value[0] =
 		loopback->setup[kcontrol->id.subdevice]
 			       [kcontrol->id.device].notify;
+	mutex_unlock(&loopback->cable_lock);
 	return 0;
 }
 
@@ -876,12 +884,14 @@
 	int change = 0;
 
 	val = ucontrol->value.integer.value[0] ? 1 : 0;
+	mutex_lock(&loopback->cable_lock);
 	if (val != loopback->setup[kcontrol->id.subdevice]
 				[kcontrol->id.device].notify) {
 		loopback->setup[kcontrol->id.subdevice]
 			[kcontrol->id.device].notify = val;
 		change = 1;
 	}
+	mutex_unlock(&loopback->cable_lock);
 	return change;
 }
 
@@ -889,13 +899,18 @@
 			       struct snd_ctl_elem_value *ucontrol)
 {
 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
-	struct loopback_cable *cable = loopback->cables
-			[kcontrol->id.subdevice][kcontrol->id.device ^ 1];
+	struct loopback_cable *cable;
+
 	unsigned int val = 0;
 
-	if (cable != NULL)
-		val = (cable->running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ?
-									1 : 0;
+	mutex_lock(&loopback->cable_lock);
+	cable = loopback->cables[kcontrol->id.subdevice][kcontrol->id.device ^ 1];
+	if (cable != NULL) {
+		unsigned int running = cable->running ^ cable->pause;
+
+		val = (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ? 1 : 0;
+	}
+	mutex_unlock(&loopback->cable_lock);
 	ucontrol->value.integer.value[0] = val;
 	return 0;
 }
@@ -938,9 +953,11 @@
 {
 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
 	
+	mutex_lock(&loopback->cable_lock);
 	ucontrol->value.integer.value[0] =
 		loopback->setup[kcontrol->id.subdevice]
 			       [kcontrol->id.device].rate;
+	mutex_unlock(&loopback->cable_lock);
 	return 0;
 }
 
@@ -960,9 +977,11 @@
 {
 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
 	
+	mutex_lock(&loopback->cable_lock);
 	ucontrol->value.integer.value[0] =
 		loopback->setup[kcontrol->id.subdevice]
 			       [kcontrol->id.device].channels;
+	mutex_unlock(&loopback->cable_lock);
 	return 0;
 }
 
diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c
index ddcc1a3..42920a2 100644
--- a/sound/drivers/opl3/opl3_synth.c
+++ b/sound/drivers/opl3/opl3_synth.c
@@ -21,6 +21,7 @@
 
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/nospec.h>
 #include <sound/opl3.h>
 #include <sound/asound_fm.h>
 
@@ -448,7 +449,7 @@
 {
 	unsigned short reg_side;
 	unsigned char op_offset;
-	unsigned char voice_offset;
+	unsigned char voice_offset, voice_op;
 
 	unsigned short opl3_reg;
 	unsigned char reg_val;
@@ -473,7 +474,9 @@
 		voice_offset = voice->voice - MAX_OPL2_VOICES;
 	}
 	/* Get register offset of operator */
-	op_offset = snd_opl3_regmap[voice_offset][voice->op];
+	voice_offset = array_index_nospec(voice_offset, MAX_OPL2_VOICES);
+	voice_op = array_index_nospec(voice->op, 4);
+	op_offset = snd_opl3_regmap[voice_offset][voice_op];
 
 	reg_val = 0x00;
 	/* Set amplitude modulation (tremolo) effect */
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
index ec4db3a..257cfbf 100644
--- a/sound/firewire/dice/dice-stream.c
+++ b/sound/firewire/dice/dice-stream.c
@@ -425,7 +425,7 @@
 		err = init_stream(dice, AMDTP_IN_STREAM, i);
 		if (err < 0) {
 			for (; i >= 0; i--)
-				destroy_stream(dice, AMDTP_OUT_STREAM, i);
+				destroy_stream(dice, AMDTP_IN_STREAM, i);
 			goto end;
 		}
 	}
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index 25e9f77..0d3d36f 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -14,7 +14,7 @@
 #define OUI_WEISS		0x001c6a
 #define OUI_LOUD		0x000ff2
 #define OUI_FOCUSRITE		0x00130e
-#define OUI_TCELECTRONIC	0x001486
+#define OUI_TCELECTRONIC	0x000166
 
 #define DICE_CATEGORY_ID	0x04
 #define WEISS_CATEGORY_ID	0x00
diff --git a/sound/pci/asihpi/hpimsginit.c b/sound/pci/asihpi/hpimsginit.c
index 7eb6171..a31a70d 100644
--- a/sound/pci/asihpi/hpimsginit.c
+++ b/sound/pci/asihpi/hpimsginit.c
@@ -23,6 +23,7 @@
 
 #include "hpi_internal.h"
 #include "hpimsginit.h"
+#include <linux/nospec.h>
 
 /* The actual message size for each object type */
 static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT;
@@ -39,10 +40,12 @@
 {
 	u16 size;
 
-	if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
+	if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
+		object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
 		size = msg_size[object];
-	else
+	} else {
 		size = sizeof(*phm);
+	}
 
 	memset(phm, 0, size);
 	phm->size = size;
@@ -66,10 +69,12 @@
 {
 	u16 size;
 
-	if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
+	if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
+		object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
 		size = res_size[object];
-	else
+	} else {
 		size = sizeof(*phr);
+	}
 
 	memset(phr, 0, sizeof(*phr));
 	phr->size = size;
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index 7e3aa50..3ef9af5 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -33,6 +33,7 @@
 #include <linux/stringify.h>
 #include <linux/module.h>
 #include <linux/vmalloc.h>
+#include <linux/nospec.h>
 
 #ifdef MODULE_FIRMWARE
 MODULE_FIRMWARE("asihpi/dsp5000.bin");
@@ -182,7 +183,8 @@
 		struct hpi_adapter *pa = NULL;
 
 		if (hm->h.adapter_index < ARRAY_SIZE(adapters))
-			pa = &adapters[hm->h.adapter_index];
+			pa = &adapters[array_index_nospec(hm->h.adapter_index,
+							  ARRAY_SIZE(adapters))];
 
 		if (!pa || !pa->adapter || !pa->adapter->type) {
 			hpi_init_response(&hr->r0, hm->h.object,
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 7f3b5ed..f7a492c 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -88,7 +88,6 @@
 config SND_HDA_CODEC_REALTEK
 	tristate "Build Realtek HD-audio codec support"
 	select SND_HDA_GENERIC
-	select INPUT
 	help
 	  Say Y or M here to include Realtek HD-audio codec support in
 	  snd-hda-intel driver, such as ALC880.
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 57df06e..cc009a4 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -21,6 +21,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/compat.h>
+#include <linux/nospec.h>
 #include <sound/core.h>
 #include "hda_codec.h"
 #include "hda_local.h"
@@ -51,7 +52,16 @@
 	
 	if (get_user(verb, &arg->verb))
 		return -EFAULT;
-	res = get_wcaps(codec, verb >> 24);
+	/* open-code get_wcaps(verb>>24) with nospec */
+	verb >>= 24;
+	if (verb < codec->core.start_nid ||
+	    verb >= codec->core.start_nid + codec->core.num_nodes) {
+		res = 0;
+	} else {
+		verb -= codec->core.start_nid;
+		verb = array_index_nospec(verb, codec->core.num_nodes);
+		res = codec->wcaps[verb];
+	}
 	if (put_user(res, &arg->res))
 		return -EFAULT;
 	return 0;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 7d3f88d..4e91120 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2061,6 +2061,8 @@
 	SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
 	/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
 	SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
+	/* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
+	SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
 	/* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
 	SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
 	{}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e2230be..39cd35f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -329,6 +329,7 @@
 		break;
 	case 0x10ec0225:
 	case 0x10ec0233:
+	case 0x10ec0235:
 	case 0x10ec0236:
 	case 0x10ec0255:
 	case 0x10ec0256:
@@ -3494,6 +3495,7 @@
 	}
 }
 
+#if IS_REACHABLE(INPUT)
 static void gpio2_mic_hotkey_event(struct hda_codec *codec,
 				   struct hda_jack_callback *event)
 {
@@ -3626,6 +3628,10 @@
 		spec->kb_dev = NULL;
 	}
 }
+#else /* INPUT */
+#define alc280_fixup_hp_gpio2_mic_hotkey	NULL
+#define alc233_fixup_lenovo_line2_mic_hotkey	NULL
+#endif /* INPUT */
 
 static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
 				const struct hda_fixup *fix, int action)
@@ -6359,6 +6365,7 @@
 	case 0x10ec0298:
 		spec->codec_variant = ALC269_TYPE_ALC298;
 		break;
+	case 0x10ec0235:
 	case 0x10ec0255:
 		spec->codec_variant = ALC269_TYPE_ALC255;
 		break;
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 14bbf55..9899ef4 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -137,6 +137,7 @@
 #include <linux/pci.h>
 #include <linux/math64.h>
 #include <linux/io.h>
+#include <linux/nospec.h>
 
 #include <sound/core.h>
 #include <sound/control.h>
@@ -5692,40 +5693,43 @@
 		struct snd_pcm_channel_info *info)
 {
 	struct hdspm *hdspm = snd_pcm_substream_chip(substream);
+	unsigned int channel = info->channel;
 
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		if (snd_BUG_ON(info->channel >= hdspm->max_channels_out)) {
+		if (snd_BUG_ON(channel >= hdspm->max_channels_out)) {
 			dev_info(hdspm->card->dev,
 				 "snd_hdspm_channel_info: output channel out of range (%d)\n",
-				 info->channel);
+				 channel);
 			return -EINVAL;
 		}
 
-		if (hdspm->channel_map_out[info->channel] < 0) {
+		channel = array_index_nospec(channel, hdspm->max_channels_out);
+		if (hdspm->channel_map_out[channel] < 0) {
 			dev_info(hdspm->card->dev,
 				 "snd_hdspm_channel_info: output channel %d mapped out\n",
-				 info->channel);
+				 channel);
 			return -EINVAL;
 		}
 
-		info->offset = hdspm->channel_map_out[info->channel] *
+		info->offset = hdspm->channel_map_out[channel] *
 			HDSPM_CHANNEL_BUFFER_BYTES;
 	} else {
-		if (snd_BUG_ON(info->channel >= hdspm->max_channels_in)) {
+		if (snd_BUG_ON(channel >= hdspm->max_channels_in)) {
 			dev_info(hdspm->card->dev,
 				 "snd_hdspm_channel_info: input channel out of range (%d)\n",
-				 info->channel);
+				 channel);
 			return -EINVAL;
 		}
 
-		if (hdspm->channel_map_in[info->channel] < 0) {
+		channel = array_index_nospec(channel, hdspm->max_channels_in);
+		if (hdspm->channel_map_in[channel] < 0) {
 			dev_info(hdspm->card->dev,
 				 "snd_hdspm_channel_info: input channel %d mapped out\n",
-				 info->channel);
+				 channel);
 			return -EINVAL;
 		}
 
-		info->offset = hdspm->channel_map_in[info->channel] *
+		info->offset = hdspm->channel_map_in[channel] *
 			HDSPM_CHANNEL_BUFFER_BYTES;
 	}
 
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index 55172c6..a76b1f1 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -26,6 +26,7 @@
 #include <linux/pci.h>
 #include <linux/module.h>
 #include <linux/io.h>
+#include <linux/nospec.h>
 
 #include <sound/core.h>
 #include <sound/control.h>
@@ -2036,9 +2037,10 @@
 	if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS))
 		return -EINVAL;
 
-	if ((chn = rme9652->channel_map[info->channel]) < 0) {
+	chn = rme9652->channel_map[array_index_nospec(info->channel,
+						      RME9652_NCHANNELS)];
+	if (chn < 0)
 		return -EINVAL;
-	}
 
 	info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES;
 	info->first = 0;
diff --git a/sound/soc/au1x/ac97c.c b/sound/soc/au1x/ac97c.c
index 29a97d5..66d6c52 100644
--- a/sound/soc/au1x/ac97c.c
+++ b/sound/soc/au1x/ac97c.c
@@ -91,8 +91,8 @@
 	do {
 		mutex_lock(&ctx->lock);
 
-		tmo = 5;
-		while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--)
+		tmo = 6;
+		while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
 			udelay(21);	/* wait an ac97 frame time */
 		if (!tmo) {
 			pr_debug("ac97rd timeout #1\n");
@@ -105,7 +105,7 @@
 		 * poll, Forrest, poll...
 		 */
 		tmo = 0x10000;
-		while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--)
+		while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
 			asm volatile ("nop");
 		data = RD(ctx, AC97_CMDRESP);
 
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 38bfd46..3ef1745 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -145,6 +145,13 @@
 
 	psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8;
 
+	/* Do not loop-search if PM (1 ~ 256) alone can serve the ratio */
+	if (ratio <= 256) {
+		pm = ratio;
+		fp = 1;
+		goto out;
+	}
+
 	/* Set the max fluctuation -- 0.1% of the max devisor */
 	savesub = (psr ? 1 : 8)  * 256 * maxfp / 1000;
 
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 85324e6..2d14e37 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -642,8 +642,12 @@
 		tmp |= mod_slave;
 		break;
 	case SND_SOC_DAIFMT_CBS_CFS:
-		/* Set default source clock in Master mode */
-		if (i2s->rclk_srcrate == 0)
+		/*
+		 * Set default source clock in Master mode, only when the
+		 * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any
+		 * clock configuration assigned in DT is not overwritten.
+		 */
+		if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL)
 			i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0,
 							0, SND_SOC_CLOCK_IN);
 		break;
@@ -858,6 +862,11 @@
 		return 0;
 
 	if (!(i2s->quirks & QUIRK_NO_MUXPSR)) {
+		struct clk *rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
+
+		if (i2s->rclk_srcrate == 0 && rclksrc && !IS_ERR(rclksrc))
+			i2s->rclk_srcrate = clk_get_rate(rclksrc);
+
 		psr = i2s->rclk_srcrate / i2s->frmclk / rfs;
 		writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR);
 		dev_dbg(&i2s->pdev->dev,
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index b761761..2840a14 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1073,7 +1073,8 @@
 	}
 	if (!rtd->platform) {
 		dev_err(card->dev, "ASoC: platform %s not registered\n",
-			dai_link->platform_name);
+			((platform_name) ? platform_name :
+			  dai_link->platform_of_node->full_name));
 		goto _err_defer;
 	}
 
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 8a758c9..d6b48c7 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1180,6 +1180,9 @@
 			kfree(sm);
 			continue;
 		}
+
+		/* create any TLV data */
+		soc_tplg_create_tlv(tplg, &kc[i], &mc->hdr);
 	}
 	return kc;
 
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index ce11cc9..1c5d099 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -975,6 +975,14 @@
 		}
 		break;
 
+	case USB_ID(0x0d8c, 0x0103):
+		if (!strcmp(kctl->id.name, "PCM Playback Volume")) {
+			usb_audio_info(chip,
+				 "set volume quirk for CM102-A+/102S+\n");
+			cval->min = -256;
+		}
+		break;
+
 	case USB_ID(0x0471, 0x0101):
 	case USB_ID(0x0471, 0x0104):
 	case USB_ID(0x0471, 0x0105):
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 9038b2e..eaa03ac 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -353,8 +353,11 @@
 /*
  * Dell usb dock with ALC4020 codec had a firmware problem where it got
  * screwed up when zero volume is passed; just skip it as a workaround
+ *
+ * Also the extension unit gives an access error, so skip it as well.
  */
 static const struct usbmix_name_map dell_alc4020_map[] = {
+	{ 4, NULL },	/* extension unit */
 	{ 16, NULL },
 	{ 19, NULL },
 	{ 0 }
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 45655b9..da9fc08 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1153,24 +1153,27 @@
 	return false;
 }
 
-/* Marantz/Denon USB DACs need a vendor cmd to switch
+/* ITF-USB DSD based DACs need a vendor cmd to switch
  * between PCM and native DSD mode
+ * (2 altsets version)
  */
-static bool is_marantz_denon_dac(unsigned int id)
+static bool is_itf_usb_dsd_2alts_dac(unsigned int id)
 {
 	switch (id) {
 	case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
 	case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
 	case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
+	case USB_ID(0x1852, 0x5065): /* Luxman DA-06 */
 		return true;
 	}
 	return false;
 }
 
-/* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch
- * between PCM/DOP and native DSD mode
+/* ITF-USB DSD based DACs need a vendor cmd to switch
+ * between PCM and native DSD mode
+ * (3 altsets version)
  */
-static bool is_teac_dsd_dac(unsigned int id)
+static bool is_itf_usb_dsd_3alts_dac(unsigned int id)
 {
 	switch (id) {
 	case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
@@ -1187,7 +1190,7 @@
 	struct usb_device *dev = subs->dev;
 	int err;
 
-	if (is_marantz_denon_dac(subs->stream->chip->usb_id)) {
+	if (is_itf_usb_dsd_2alts_dac(subs->stream->chip->usb_id)) {
 		/* First switch to alt set 0, otherwise the mode switch cmd
 		 * will not be accepted by the DAC
 		 */
@@ -1208,7 +1211,7 @@
 			break;
 		}
 		mdelay(20);
-	} else if (is_teac_dsd_dac(subs->stream->chip->usb_id)) {
+	} else if (is_itf_usb_dsd_3alts_dac(subs->stream->chip->usb_id)) {
 		/* Vendor mode switch cmd is required. */
 		switch (fmt->altsetting) {
 		case 3: /* DSD mode (DSD_U32) requested */
@@ -1304,10 +1307,10 @@
 	    (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
 		mdelay(20);
 
-	/* Marantz/Denon devices with USB DAC functionality need a delay
+	/* ITF-USB DSD based DACs functionality need a delay
 	 * after each class compliant request
 	 */
-	if (is_marantz_denon_dac(chip->usb_id)
+	if (is_itf_usb_dsd_2alts_dac(chip->usb_id)
 	    && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
 		mdelay(20);
 
@@ -1371,14 +1374,14 @@
 		break;
 	}
 
-	/* Denon/Marantz devices with USB DAC functionality */
-	if (is_marantz_denon_dac(chip->usb_id)) {
+	/* ITF-USB DSD based DACs (2 altsets version) */
+	if (is_itf_usb_dsd_2alts_dac(chip->usb_id)) {
 		if (fp->altsetting == 2)
 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
 	}
 
-	/* TEAC devices with USB DAC functionality */
-	if (is_teac_dsd_dac(chip->usb_id)) {
+	/* ITF-USB DSD based DACs (3 altsets version) */
+	if (is_itf_usb_dsd_3alts_dac(chip->usb_id)) {
 		if (fp->altsetting == 3)
 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
 	}
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index a2b3eb3..0b8cf31 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -84,6 +84,13 @@
 #define KVM_VGIC_V2_DIST_SIZE		0x1000
 #define KVM_VGIC_V2_CPU_SIZE		0x2000
 
+/* Supported VGICv3 address types  */
+#define KVM_VGIC_V3_ADDR_TYPE_DIST	2
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST	3
+
+#define KVM_VGIC_V3_DIST_SIZE		SZ_64K
+#define KVM_VGIC_V3_REDIST_SIZE		(2 * SZ_64K)
+
 #define KVM_ARM_VCPU_POWER_OFF		0 /* CPU is started in OFF state */
 #define KVM_ARM_VCPU_PSCI_0_2		1 /* CPU uses PSCI v0.2 */
 
@@ -166,6 +173,12 @@
 #define KVM_REG_ARM_VFP_FPINST		0x1009
 #define KVM_REG_ARM_VFP_FPINST2		0x100A
 
+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW			(0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r)		(KVM_REG_ARM | KVM_REG_SIZE_U64 | \
+					 KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION	KVM_REG_ARM_FW_REG(0)
+
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR	0
 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS	1
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 3051f86..702de7a 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -195,6 +195,12 @@
 #define KVM_REG_ARM_TIMER_CNT		ARM64_SYS_REG(3, 3, 14, 3, 2)
 #define KVM_REG_ARM_TIMER_CVAL		ARM64_SYS_REG(3, 3, 14, 0, 2)
 
+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW			(0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r)		(KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+					 KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION	KVM_REG_ARM_FW_REG(0)
+
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR	0
 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS	1
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h
index c93cf35..0fb1326 100644
--- a/tools/arch/powerpc/include/uapi/asm/kvm.h
+++ b/tools/arch/powerpc/include/uapi/asm/kvm.h
@@ -596,6 +596,7 @@
 #define KVM_REG_PPC_TM_VSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
 #define KVM_REG_PPC_TM_DSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
 #define KVM_REG_PPC_TM_TAR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
+#define KVM_REG_PPC_TM_XER	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
 
 /* PPC64 eXternal Interrupt Controller Specification */
 #define KVM_DEV_XICS_GRP_SOURCES	1	/* 64-bit source attributes */
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index a2ffec4..81c02e1 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -197,6 +197,7 @@
 #define KVM_SYNC_VRS    (1UL << 6)
 #define KVM_SYNC_RICCB  (1UL << 7)
 #define KVM_SYNC_FPRS   (1UL << 8)
+#define KVM_SYNC_BPBC	(1UL << 10)
 /* definition of registers in kvm_run */
 struct kvm_sync_regs {
 	__u64 prefix;	/* prefix register */
@@ -217,7 +218,9 @@
 	};
 	__u8  reserved[512];	/* for future vector expansion */
 	__u32 fpc;		/* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
-	__u8 padding[52];	/* riccb needs to be 64byte aligned */
+	__u8 bpbc : 1;		/* bp mode */
+	__u8 reserved2 : 7;
+	__u8 padding1[51];	/* riccb needs to be 64byte aligned */
 	__u8 riccb[64];		/* runtime instrumentation controls block */
 };
 
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index f79669a..c278f27 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -12,7 +12,7 @@
 /*
  * Defines x86 CPU feature bits
  */
-#define NCAPINTS	18	/* N 32-bit words worth of info */
+#define NCAPINTS	19	/* N 32-bit words worth of info */
 #define NBUGINTS	1	/* N 32-bit bug flags */
 
 /*
@@ -189,17 +189,32 @@
 
 #define X86_FEATURE_CPB		( 7*32+ 2) /* AMD Core Performance Boost */
 #define X86_FEATURE_EPB		( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */
 
 #define X86_FEATURE_HW_PSTATE	( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 
-#define X86_FEATURE_INTEL_PT	( 7*32+15) /* Intel Processor Trace */
-#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
-#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_RETPOLINE	( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+
+#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
+#define X86_FEATURE_SSBD	( 7*32+17) /* Speculative Store Bypass Disable */
+
+#define X86_FEATURE_RSB_CTXSW	( 7*32+19) /* "" Fill RSB on context switches */
 
 /* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
 #define X86_FEATURE_KAISER	( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
 
+#define X86_FEATURE_USE_IBPB	( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+#define X86_FEATURE_USE_IBRS_FW	( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+#define X86_FEATURE_LS_CFG_SSBD	( 7*32+24) /* "" AMD SSBD implementation */
+#define X86_FEATURE_IBRS	( 7*32+25) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_IBPB	( 7*32+26) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_STIBP	( 7*32+27) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ZEN		( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
+
+
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
 #define X86_FEATURE_VNMI        ( 8*32+ 1) /* Intel Virtual NMI */
@@ -231,6 +246,7 @@
 #define X86_FEATURE_SMAP	( 9*32+20) /* Supervisor Mode Access Prevention */
 #define X86_FEATURE_CLFLUSHOPT	( 9*32+23) /* CLFLUSHOPT instruction */
 #define X86_FEATURE_CLWB	( 9*32+24) /* CLWB instruction */
+#define X86_FEATURE_INTEL_PT	( 9*32+25) /* Intel Processor Trace */
 #define X86_FEATURE_AVX512PF	( 9*32+26) /* AVX-512 Prefetch */
 #define X86_FEATURE_AVX512ER	( 9*32+27) /* AVX-512 Exponential and Reciprocal */
 #define X86_FEATURE_AVX512CD	( 9*32+28) /* AVX-512 Conflict Detection */
@@ -255,6 +271,10 @@
 /* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
 #define X86_FEATURE_CLZERO	(13*32+0) /* CLZERO instruction */
 #define X86_FEATURE_IRPERF	(13*32+1) /* Instructions Retired Count */
+#define X86_FEATURE_AMD_IBPB	(13*32+12) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_AMD_IBRS	(13*32+14) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_AMD_STIBP	(13*32+15) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_VIRT_SSBD	(13*32+25) /* Virtualized Speculative Store Bypass Disable */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
 #define X86_FEATURE_DTHERM	(14*32+ 0) /* Digital Thermal Sensor */
@@ -290,6 +310,16 @@
 #define X86_FEATURE_SUCCOR	(17*32+1) /* Uncorrectable error containment and recovery */
 #define X86_FEATURE_SMCA	(17*32+3) /* Scalable MCA */
 
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+#define X86_FEATURE_AVX512_4VNNIW	(18*32+ 2) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS	(18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_PCONFIG		(18*32+18) /* Intel PCONFIG */
+#define X86_FEATURE_SPEC_CTRL		(18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+#define X86_FEATURE_INTEL_STIBP		(18*32+27) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ARCH_CAPABILITIES	(18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+#define X86_FEATURE_SPEC_CTRL_SSBD	(18*32+31) /* "" Speculative Store Bypass Disable */
+
 /*
  * BUG word(s)
  */
@@ -314,4 +344,10 @@
 #define X86_BUG_NULL_SEG	X86_BUG(10) /* Nulling a selector preserves the base */
 #define X86_BUG_SWAPGS_FENCE	X86_BUG(11) /* SWAPGS without input dep on GS */
 #define X86_BUG_MONITOR		X86_BUG(12) /* IPI required to wake up remote CPU */
+#define X86_BUG_AMD_E400	X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+#define X86_BUG_CPU_MELTDOWN	X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
+#define X86_BUG_SPECTRE_V1	X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+#define X86_BUG_SPECTRE_V2	X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
+#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+
 #endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index 85599ad..1f8cca4 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -21,11 +21,13 @@
 # define DISABLE_K6_MTRR	(1<<(X86_FEATURE_K6_MTRR & 31))
 # define DISABLE_CYRIX_ARR	(1<<(X86_FEATURE_CYRIX_ARR & 31))
 # define DISABLE_CENTAUR_MCR	(1<<(X86_FEATURE_CENTAUR_MCR & 31))
+# define DISABLE_PCID		0
 #else
 # define DISABLE_VME		0
 # define DISABLE_K6_MTRR	0
 # define DISABLE_CYRIX_ARR	0
 # define DISABLE_CENTAUR_MCR	0
+# define DISABLE_PCID		(1<<(X86_FEATURE_PCID & 31))
 #endif /* CONFIG_X86_64 */
 
 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
@@ -43,7 +45,7 @@
 #define DISABLED_MASK1	0
 #define DISABLED_MASK2	0
 #define DISABLED_MASK3	(DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
-#define DISABLED_MASK4	0
+#define DISABLED_MASK4	(DISABLE_PCID)
 #define DISABLED_MASK5	0
 #define DISABLED_MASK6	0
 #define DISABLED_MASK7	0
@@ -57,6 +59,7 @@
 #define DISABLED_MASK15	0
 #define DISABLED_MASK16	(DISABLE_PKU|DISABLE_OSPKE)
 #define DISABLED_MASK17	0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
+#define DISABLED_MASK18	0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
 
 #endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h
index fac9a5c..6847d85 100644
--- a/tools/arch/x86/include/asm/required-features.h
+++ b/tools/arch/x86/include/asm/required-features.h
@@ -100,6 +100,7 @@
 #define REQUIRED_MASK15	0
 #define REQUIRED_MASK16	0
 #define REQUIRED_MASK17	0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
+#define REQUIRED_MASK18	0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
 
 #endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/tools/include/asm-generic/bitops.h b/tools/include/asm-generic/bitops.h
index 653d1ba..0304600 100644
--- a/tools/include/asm-generic/bitops.h
+++ b/tools/include/asm-generic/bitops.h
@@ -13,6 +13,7 @@
  */
 
 #include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/__ffz.h>
 #include <asm-generic/bitops/fls.h>
 #include <asm-generic/bitops/__fls.h>
 #include <asm-generic/bitops/fls64.h>
diff --git a/tools/include/asm-generic/bitops/__ffz.h b/tools/include/asm-generic/bitops/__ffz.h
new file mode 100644
index 0000000..6744bd4
--- /dev/null
+++ b/tools/include/asm-generic/bitops/__ffz.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
+#define _ASM_GENERIC_BITOPS_FFZ_H_
+
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+#define ffz(x)  __ffs(~(x))
+
+#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */
diff --git a/tools/include/asm-generic/bitops/find.h b/tools/include/asm-generic/bitops/find.h
index 31f5154..5538ecd 100644
--- a/tools/include/asm-generic/bitops/find.h
+++ b/tools/include/asm-generic/bitops/find.h
@@ -15,6 +15,21 @@
 		size, unsigned long offset);
 #endif
 
+#ifndef find_next_zero_bit
+
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number of the next zero bit
+ * If no bits are zero, returns @size.
+ */
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+				 unsigned long offset);
+#endif
+
 #ifndef find_first_bit
 
 /**
@@ -30,4 +45,17 @@
 
 #endif /* find_first_bit */
 
+#ifndef find_first_zero_bit
+
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first cleared bit.
+ * If no bits are zero, returns @size.
+ */
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size);
+#endif
+
 #endif /*_TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/tools/include/linux/atomic.h b/tools/include/linux/atomic.h
index 4e3d3d1..9f21fc2 100644
--- a/tools/include/linux/atomic.h
+++ b/tools/include/linux/atomic.h
@@ -3,4 +3,10 @@
 
 #include <asm/atomic.h>
 
+/* atomic_cmpxchg_relaxed */
+#ifndef atomic_cmpxchg_relaxed
+#define  atomic_cmpxchg_relaxed		atomic_cmpxchg
+#define  atomic_cmpxchg_release         atomic_cmpxchg
+#endif /* atomic_cmpxchg_relaxed */
+
 #endif /* __TOOLS_LINUX_ATOMIC_H */
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index 43c1c50..eef41d5 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -35,6 +35,32 @@
 	}
 }
 
+static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
+{
+	unsigned int nlongs = BITS_TO_LONGS(nbits);
+	if (!small_const_nbits(nbits)) {
+		unsigned int len = (nlongs - 1) * sizeof(unsigned long);
+		memset(dst, 0xff,  len);
+	}
+	dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
+}
+
+static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
+{
+	if (small_const_nbits(nbits))
+		return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
+
+	return find_first_bit(src, nbits) == nbits;
+}
+
+static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
+{
+	if (small_const_nbits(nbits))
+		return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
+
+	return find_first_zero_bit(src, nbits) == nbits;
+}
+
 static inline int bitmap_weight(const unsigned long *src, int nbits)
 {
 	if (small_const_nbits(nbits))
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index 49c929a..fc446343 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -39,6 +39,11 @@
 	     (bit) < (size);					\
 	     (bit) = find_next_bit((addr), (size), (bit) + 1))
 
+#define for_each_clear_bit(bit, addr, size) \
+	for ((bit) = find_first_zero_bit((addr), (size));       \
+	     (bit) < (size);                                    \
+	     (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+
 /* same as for_each_set_bit() but use bit as value to start with */
 #define for_each_set_bit_from(bit, addr, size) \
 	for ((bit) = find_next_bit((addr), (size), (bit));	\
diff --git a/tools/include/linux/bug.h b/tools/include/linux/bug.h
new file mode 100644
index 0000000..8e4a4f4
--- /dev/null
+++ b/tools/include/linux/bug.h
@@ -0,0 +1,10 @@
+#ifndef _TOOLS_PERF_LINUX_BUG_H
+#define _TOOLS_PERF_LINUX_BUG_H
+
+/* Force a compilation error if condition is true, but also produce a
+   result (of value 0 and type size_t), so the expression can be used
+   e.g. in a structure initializer (or where-ever else comma expressions
+   aren't permitted). */
+#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
+
+#endif	/* _TOOLS_PERF_LINUX_BUG_H */
diff --git a/tools/include/linux/compiler-gcc.h b/tools/include/linux/compiler-gcc.h
new file mode 100644
index 0000000..825d44f
--- /dev/null
+++ b/tools/include/linux/compiler-gcc.h
@@ -0,0 +1,21 @@
+#ifndef _TOOLS_LINUX_COMPILER_H_
+#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
+#endif
+
+/*
+ * Common definitions for all gcc versions go here.
+ */
+#define GCC_VERSION (__GNUC__ * 10000		\
+		     + __GNUC_MINOR__ * 100	\
+		     + __GNUC_PATCHLEVEL__)
+
+#if GCC_VERSION >= 70000 && !defined(__CHECKER__)
+# define __fallthrough __attribute__ ((fallthrough))
+#endif
+
+#if GCC_VERSION >= 40300
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* GCC_VERSION >= 40300 */
+
+/* &a[0] degrades to a pointer: a different type from an array */
+#define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index d94179f..23299d7 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -1,6 +1,14 @@
 #ifndef _TOOLS_LINUX_COMPILER_H_
 #define _TOOLS_LINUX_COMPILER_H_
 
+#ifdef __GNUC__
+#include <linux/compiler-gcc.h>
+#endif
+
+#ifndef __compiletime_error
+# define __compiletime_error(message)
+#endif
+
 /* Optimization barrier */
 /* The "volatile" is due to gcc bugs */
 #define barrier() __asm__ __volatile__("": : :"memory")
@@ -9,6 +17,11 @@
 # define __always_inline	inline __attribute__((always_inline))
 #endif
 
+/* Are two types/vars the same type (ignoring qualifiers)? */
+#ifndef __same_type
+# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+#endif
+
 #ifdef __ANDROID__
 /*
  * FIXME: Big hammer to get rid of tons of:
@@ -21,6 +34,8 @@
 #endif
 
 #define __user
+#define __rcu
+#define __read_mostly
 
 #ifndef __attribute_const__
 # define __attribute_const__
@@ -50,6 +65,8 @@
 # define unlikely(x)		__builtin_expect(!!(x), 0)
 #endif
 
+#define uninitialized_var(x) x = *(&(x))
+
 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
 
 #include <linux/types.h>
@@ -128,11 +145,7 @@
 
 
 #ifndef __fallthrough
-# if defined(__GNUC__) && __GNUC__ >= 7
-#  define __fallthrough __attribute__ ((fallthrough))
-# else
-#  define __fallthrough
-# endif
+# define __fallthrough
 #endif
 
 #endif /* _TOOLS_LINUX_COMPILER_H */
diff --git a/tools/include/linux/hashtable.h b/tools/include/linux/hashtable.h
index c65cc0a..251eabf 100644
--- a/tools/include/linux/hashtable.h
+++ b/tools/include/linux/hashtable.h
@@ -13,10 +13,6 @@
 #include <linux/hash.h>
 #include <linux/log2.h>
 
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
 #define DEFINE_HASHTABLE(name, bits)						\
 	struct hlist_head name[1 << (bits)] =					\
 			{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h
index 28607db..73ccc48 100644
--- a/tools/include/linux/kernel.h
+++ b/tools/include/linux/kernel.h
@@ -4,6 +4,11 @@
 #include <stdarg.h>
 #include <stddef.h>
 #include <assert.h>
+#include <linux/compiler.h>
+
+#ifndef UINT_MAX
+#define UINT_MAX	(~0U)
+#endif
 
 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
 
@@ -72,6 +77,8 @@
 int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
 int scnprintf(char * buf, size_t size, const char * fmt, ...);
 
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+
 /*
  * This looks more complex than it should be. But we need to
  * get the type for the ~ right in round_down (it needs to be
diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h
index d5677d3..0325cef 100644
--- a/tools/include/linux/log2.h
+++ b/tools/include/linux/log2.h
@@ -12,6 +12,9 @@
 #ifndef _TOOLS_LINUX_LOG2_H
 #define _TOOLS_LINUX_LOG2_H
 
+#include <linux/bitops.h>
+#include <linux/types.h>
+
 /*
  * non-constant log of base 2 calculators
  * - the arch may override these in asm/bitops.h if they can be implemented
diff --git a/tools/include/linux/refcount.h b/tools/include/linux/refcount.h
new file mode 100644
index 0000000..a0177c1
--- /dev/null
+++ b/tools/include/linux/refcount.h
@@ -0,0 +1,151 @@
+#ifndef _TOOLS_LINUX_REFCOUNT_H
+#define _TOOLS_LINUX_REFCOUNT_H
+
+/*
+ * Variant of atomic_t specialized for reference counts.
+ *
+ * The interface matches the atomic_t interface (to aid in porting) but only
+ * provides the few functions one should use for reference counting.
+ *
+ * It differs in that the counter saturates at UINT_MAX and will not move once
+ * there. This avoids wrapping the counter and causing 'spurious'
+ * use-after-free issues.
+ *
+ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
+ * and provide only what is strictly required for refcounts.
+ *
+ * The increments are fully relaxed; these will not provide ordering. The
+ * rationale is that whatever is used to obtain the object we're increasing the
+ * reference count on will provide the ordering. For locked data structures,
+ * its the lock acquire, for RCU/lockless data structures its the dependent
+ * load.
+ *
+ * Do note that inc_not_zero() provides a control dependency which will order
+ * future stores against the inc, this ensures we'll never modify the object
+ * if we did not in fact acquire a reference.
+ *
+ * The decrements will provide release order, such that all the prior loads and
+ * stores will be issued before, it also provides a control dependency, which
+ * will order us against the subsequent free().
+ *
+ * The control dependency is against the load of the cmpxchg (ll/sc) that
+ * succeeded. This means the stores aren't fully ordered, but this is fine
+ * because the 1->0 transition indicates no concurrency.
+ *
+ * Note that the allocator is responsible for ordering things between free()
+ * and alloc().
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+
+#ifdef NDEBUG
+#define REFCOUNT_WARN(cond, str) (void)(cond)
+#define __refcount_check
+#else
+#define REFCOUNT_WARN(cond, str) BUG_ON(cond)
+#define __refcount_check	__must_check
+#endif
+
+typedef struct refcount_struct {
+	atomic_t refs;
+} refcount_t;
+
+#define REFCOUNT_INIT(n)	{ .refs = ATOMIC_INIT(n), }
+
+static inline void refcount_set(refcount_t *r, unsigned int n)
+{
+	atomic_set(&r->refs, n);
+}
+
+static inline unsigned int refcount_read(const refcount_t *r)
+{
+	return atomic_read(&r->refs);
+}
+
+/*
+ * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_inc_not_zero(refcount_t *r)
+{
+	unsigned int old, new, val = atomic_read(&r->refs);
+
+	for (;;) {
+		new = val + 1;
+
+		if (!val)
+			return false;
+
+		if (unlikely(!new))
+			return true;
+
+		old = atomic_cmpxchg_relaxed(&r->refs, val, new);
+		if (old == val)
+			break;
+
+		val = old;
+	}
+
+	REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+
+	return true;
+}
+
+/*
+ * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller already has a
+ * reference on the object, will WARN when this is not so.
+ */
+static inline void refcount_inc(refcount_t *r)
+{
+	REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+}
+
+/*
+ * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
+ * decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+{
+	unsigned int old, new, val = atomic_read(&r->refs);
+
+	for (;;) {
+		if (unlikely(val == UINT_MAX))
+			return false;
+
+		new = val - i;
+		if (new > val) {
+			REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+			return false;
+		}
+
+		old = atomic_cmpxchg_release(&r->refs, val, new);
+		if (old == val)
+			break;
+
+		val = old;
+	}
+
+	return !new;
+}
+
+static inline __refcount_check
+bool refcount_dec_and_test(refcount_t *r)
+{
+	return refcount_sub_and_test(1, r);
+}
+
+
+#endif /* _ATOMIC_LINUX_REFCOUNT_H */
diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h
new file mode 100644
index 0000000..58397dc
--- /dev/null
+++ b/tools/include/linux/spinlock.h
@@ -0,0 +1,5 @@
+#define spinlock_t		pthread_mutex_t
+#define DEFINE_SPINLOCK(x)	pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER;
+
+#define spin_lock_irqsave(x, f)		(void)f, pthread_mutex_lock(x)
+#define spin_unlock_irqrestore(x, f)	(void)f, pthread_mutex_unlock(x)
diff --git a/tools/include/linux/types.h b/tools/include/linux/types.h
index 8ebf627..77a28a2 100644
--- a/tools/include/linux/types.h
+++ b/tools/include/linux/types.h
@@ -7,6 +7,7 @@
 
 #define __SANE_USERSPACE_TYPES__	/* For PPC64, to get LL64 types */
 #include <asm/types.h>
+#include <asm/posix_types.h>
 
 struct page;
 struct kmem_cache;
@@ -42,11 +43,7 @@
 #else
 #define __bitwise__
 #endif
-#ifdef __CHECK_ENDIAN__
 #define __bitwise __bitwise__
-#else
-#define __bitwise
-#endif
 
 #define __force
 #define __user
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index 5827438..8c27db0 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -72,4 +72,9 @@
 #define MAP_HUGE_SHIFT	26
 #define MAP_HUGE_MASK	0x3f
 
+#define PKEY_DISABLE_ACCESS	0x1
+#define PKEY_DISABLE_WRITE	0x2
+#define PKEY_ACCESS_MASK	(PKEY_DISABLE_ACCESS |\
+				 PKEY_DISABLE_WRITE)
+
 #endif /* __ASM_GENERIC_MMAN_COMMON_H */
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 42dfbeb..a339bea 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -73,6 +73,8 @@
 	BPF_PROG_LOAD,
 	BPF_OBJ_PIN,
 	BPF_OBJ_GET,
+	BPF_PROG_ATTACH,
+	BPF_PROG_DETACH,
 };
 
 enum bpf_map_type {
@@ -95,8 +97,24 @@
 	BPF_PROG_TYPE_SCHED_ACT,
 	BPF_PROG_TYPE_TRACEPOINT,
 	BPF_PROG_TYPE_XDP,
+	BPF_PROG_TYPE_PERF_EVENT,
+	BPF_PROG_TYPE_CGROUP_SKB,
 };
 
+enum bpf_attach_type {
+	BPF_CGROUP_INET_INGRESS,
+	BPF_CGROUP_INET_EGRESS,
+	__MAX_BPF_ATTACH_TYPE
+};
+
+#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
+
+/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
+ * to the given target_fd cgroup the descendent cgroup will be able to
+ * override effective bpf program that was inherited from this cgroup
+ */
+#define BPF_F_ALLOW_OVERRIDE	(1U << 0)
+
 #define BPF_PSEUDO_MAP_FD	1
 
 /* flags for BPF_MAP_UPDATE_ELEM command */
@@ -106,6 +124,10 @@
 
 #define BPF_F_NO_PREALLOC	(1U << 0)
 
+/* Flags for accessing BPF object */
+#define BPF_F_RDONLY		(1U << 3)
+#define BPF_F_WRONLY		(1U << 4)
+
 union bpf_attr {
 	struct { /* anonymous struct used by BPF_MAP_CREATE command */
 		__u32	map_type;	/* one of enum bpf_map_type */
@@ -139,6 +161,14 @@
 	struct { /* anonymous struct used by BPF_OBJ_* commands */
 		__aligned_u64	pathname;
 		__u32		bpf_fd;
+		__u32		file_flags;
+	};
+
+	struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
+		__u32		target_fd;	/* container object to attach to */
+		__u32		attach_bpf_fd;	/* eBPF program to attach */
+		__u32		attach_type;
+		__u32		attach_flags;
 	};
 } __attribute__((aligned(8)));
 
@@ -376,40 +406,52 @@
 	BPF_FUNC_probe_write_user,
 
 	/**
-	 * int bpf_skb_change_tail(skb, len, flags)
-	 *     The helper will resize the skb to the given new size, to be used f.e.
-	 *     with control messages.
-	 *     @skb: pointer to skb
-	 *     @len: new skb length
-	 *     @flags: reserved
-	 *     Return: 0 on success or negative error
+	 * bpf_current_task_under_cgroup(map, index) - Check cgroup2 membership of current task
+	 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
+	 * @index: index of the cgroup in the bpf_map
+	 * Return:
+	 *   == 0 current failed the cgroup2 descendant test
+	 *   == 1 current succeeded the cgroup2 descendant test
+	 *    < 0 error
+	 */
+	BPF_FUNC_current_task_under_cgroup,
+
+	/**
+	 * bpf_skb_change_tail(skb, len, flags)
+	 * The helper will resize the skb to the given new size,
+	 * to be used f.e. with control messages.
+	 * @skb: pointer to skb
+	 * @len: new skb length
+	 * @flags: reserved
+	 * Return: 0 on success or negative error
 	 */
 	BPF_FUNC_skb_change_tail,
 
 	/**
-	 * int bpf_skb_pull_data(skb, len)
-	 *     The helper will pull in non-linear data in case the skb is non-linear
-	 *     and not all of len are part of the linear section. Only needed for
-	 *     read/write with direct packet access.
-	 *     @skb: pointer to skb
-	 *     @len: len to make read/writeable
-	 *     Return: 0 on success or negative error
+	 * bpf_skb_pull_data(skb, len)
+	 * The helper will pull in non-linear data in case the
+	 * skb is non-linear and not all of len are part of the
+	 * linear section. Only needed for read/write with direct
+	 * packet access.
+	 * @skb: pointer to skb
+	 * @len: len to make read/writeable
+	 * Return: 0 on success or negative error
 	 */
 	BPF_FUNC_skb_pull_data,
 
 	/**
-	 * s64 bpf_csum_update(skb, csum)
-	 *     Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
-	 *     @skb: pointer to skb
-	 *     @csum: csum to add
-	 *     Return: csum on success or negative error
+	 * bpf_csum_update(skb, csum)
+	 * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
+	 * @skb: pointer to skb
+	 * @csum: csum to add
+	 * Return: csum on success or negative error
 	 */
 	BPF_FUNC_csum_update,
 
 	/**
-	 * void bpf_set_hash_invalid(skb)
-	 *     Invalidate current skb->hash.
-	 *     @skb: pointer to skb
+	 * bpf_set_hash_invalid(skb)
+	 * Invalidate current skb>hash.
+	 * @skb: pointer to skb
 	 */
 	BPF_FUNC_set_hash_invalid,
 
@@ -457,12 +499,11 @@
 	BPF_FUNC_probe_read_str,
 
 	/**
-	 * u64 bpf_get_socket_cookie(skb)
-	 * Get the cookie for the socket stored inside sk_buff.
-	 * @skb: pointer to skb
-	 * Return: 8 Bytes non-decreasing number on success or 0 if
-	 * the socket
-	 * field is missing inside sk_buff
+	 * u64 bpf_bpf_get_socket_cookie(skb)
+	 *     Get the cookie for the socket stored inside sk_buff.
+	 *     @skb: pointer to skb
+	 *     Return: 8 Bytes non-decreasing number on success or 0 if the socket
+	 *     field is missing inside sk_buff
 	 */
 	BPF_FUNC_get_socket_cookie,
 
@@ -470,7 +511,8 @@
 	 * u32 bpf_get_socket_uid(skb)
 	 *     Get the owner uid of the socket stored inside sk_buff.
 	 *     @skb: pointer to skb
-	 *     Return: uid of the socket owner on success or overflowuid if failed.
+	 *     Return: uid of the socket owner on success or 0 if the socket pointer
+	 *     inside sk_buff is NULL
 	 */
 	BPF_FUNC_get_socket_uid,
 
diff --git a/tools/include/uapi/linux/fcntl.h b/tools/include/uapi/linux/fcntl.h
new file mode 100644
index 0000000..beed138
--- /dev/null
+++ b/tools/include/uapi/linux/fcntl.h
@@ -0,0 +1,67 @@
+#ifndef _UAPI_LINUX_FCNTL_H
+#define _UAPI_LINUX_FCNTL_H
+
+#include <asm/fcntl.h>
+
+#define F_SETLEASE	(F_LINUX_SPECIFIC_BASE + 0)
+#define F_GETLEASE	(F_LINUX_SPECIFIC_BASE + 1)
+
+/*
+ * Cancel a blocking posix lock; internal use only until we expose an
+ * asynchronous lock api to userspace:
+ */
+#define F_CANCELLK	(F_LINUX_SPECIFIC_BASE + 5)
+
+/* Create a file descriptor with FD_CLOEXEC set. */
+#define F_DUPFD_CLOEXEC	(F_LINUX_SPECIFIC_BASE + 6)
+
+/*
+ * Request nofications on a directory.
+ * See below for events that may be notified.
+ */
+#define F_NOTIFY	(F_LINUX_SPECIFIC_BASE+2)
+
+/*
+ * Set and get of pipe page size array
+ */
+#define F_SETPIPE_SZ	(F_LINUX_SPECIFIC_BASE + 7)
+#define F_GETPIPE_SZ	(F_LINUX_SPECIFIC_BASE + 8)
+
+/*
+ * Set/Get seals
+ */
+#define F_ADD_SEALS	(F_LINUX_SPECIFIC_BASE + 9)
+#define F_GET_SEALS	(F_LINUX_SPECIFIC_BASE + 10)
+
+/*
+ * Types of seals
+ */
+#define F_SEAL_SEAL	0x0001	/* prevent further seals from being set */
+#define F_SEAL_SHRINK	0x0002	/* prevent file from shrinking */
+#define F_SEAL_GROW	0x0004	/* prevent file from growing */
+#define F_SEAL_WRITE	0x0008	/* prevent writes */
+/* (1U << 31) is reserved for signed error codes */
+
+/*
+ * Types of directory notifications that may be requested.
+ */
+#define DN_ACCESS	0x00000001	/* File accessed */
+#define DN_MODIFY	0x00000002	/* File modified */
+#define DN_CREATE	0x00000004	/* File created */
+#define DN_DELETE	0x00000008	/* File removed */
+#define DN_RENAME	0x00000010	/* File renamed */
+#define DN_ATTRIB	0x00000020	/* File changed attibutes */
+#define DN_MULTISHOT	0x80000000	/* Don't remove notifier */
+
+#define AT_FDCWD		-100    /* Special value used to indicate
+                                           openat should use the current
+                                           working directory. */
+#define AT_SYMLINK_NOFOLLOW	0x100   /* Do not follow symbolic links.  */
+#define AT_REMOVEDIR		0x200   /* Remove directory instead of
+                                           unlinking file.  */
+#define AT_SYMLINK_FOLLOW	0x400   /* Follow symbolic links.  */
+#define AT_NO_AUTOMOUNT		0x800	/* Suppress terminal automount traversal */
+#define AT_EMPTY_PATH		0x1000	/* Allow empty relative pathname */
+
+
+#endif /* _UAPI_LINUX_FCNTL_H */
diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h
new file mode 100644
index 0000000..7fec7e3
--- /dev/null
+++ b/tools/include/uapi/linux/stat.h
@@ -0,0 +1,45 @@
+#ifndef _UAPI_LINUX_STAT_H
+#define _UAPI_LINUX_STAT_H
+
+
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
+
+#define S_IFMT  00170000
+#define S_IFSOCK 0140000
+#define S_IFLNK	 0120000
+#define S_IFREG  0100000
+#define S_IFBLK  0060000
+#define S_IFDIR  0040000
+#define S_IFCHR  0020000
+#define S_IFIFO  0010000
+#define S_ISUID  0004000
+#define S_ISGID  0002000
+#define S_ISVTX  0001000
+
+#define S_ISLNK(m)	(((m) & S_IFMT) == S_IFLNK)
+#define S_ISREG(m)	(((m) & S_IFMT) == S_IFREG)
+#define S_ISDIR(m)	(((m) & S_IFMT) == S_IFDIR)
+#define S_ISCHR(m)	(((m) & S_IFMT) == S_IFCHR)
+#define S_ISBLK(m)	(((m) & S_IFMT) == S_IFBLK)
+#define S_ISFIFO(m)	(((m) & S_IFMT) == S_IFIFO)
+#define S_ISSOCK(m)	(((m) & S_IFMT) == S_IFSOCK)
+
+#define S_IRWXU 00700
+#define S_IRUSR 00400
+#define S_IWUSR 00200
+#define S_IXUSR 00100
+
+#define S_IRWXG 00070
+#define S_IRGRP 00040
+#define S_IWGRP 00020
+#define S_IXGRP 00010
+
+#define S_IRWXO 00007
+#define S_IROTH 00004
+#define S_IWOTH 00002
+#define S_IXOTH 00001
+
+#endif
+
+
+#endif /* _UAPI_LINUX_STAT_H */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index b699aea..7788cfb 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -590,6 +590,24 @@
 	return 0;
 }
 
+static bool section_have_execinstr(struct bpf_object *obj, int idx)
+{
+	Elf_Scn *scn;
+	GElf_Shdr sh;
+
+	scn = elf_getscn(obj->efile.elf, idx);
+	if (!scn)
+		return false;
+
+	if (gelf_getshdr(scn, &sh) != &sh)
+		return false;
+
+	if (sh.sh_flags & SHF_EXECINSTR)
+		return true;
+
+	return false;
+}
+
 static int bpf_object__elf_collect(struct bpf_object *obj)
 {
 	Elf *elf = obj->efile.elf;
@@ -673,6 +691,14 @@
 		} else if (sh.sh_type == SHT_REL) {
 			void *reloc = obj->efile.reloc;
 			int nr_reloc = obj->efile.nr_reloc + 1;
+			int sec = sh.sh_info; /* points to other section */
+
+			/* Only do relo for section with exec instructions */
+			if (!section_have_execinstr(obj, sec)) {
+				pr_debug("skip relo %s(%d) for section(%d)\n",
+					 name, idx, sec);
+				continue;
+			}
 
 			reloc = realloc(reloc,
 					sizeof(*obj->efile.reloc) * nr_reloc);
diff --git a/tools/lib/find_bit.c b/tools/lib/find_bit.c
index 9122a9e..6d8b8f2 100644
--- a/tools/lib/find_bit.c
+++ b/tools/lib/find_bit.c
@@ -82,3 +82,28 @@
 	return size;
 }
 #endif
+
+#ifndef find_first_zero_bit
+/*
+ * Find the first cleared bit in a memory region.
+ */
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+	unsigned long idx;
+
+	for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
+		if (addr[idx] != ~0UL)
+			return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
+	}
+
+	return size;
+}
+#endif
+
+#ifndef find_next_zero_bit
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+				 unsigned long offset)
+{
+	return _find_next_bit(addr, size, offset, ~0UL);
+}
+#endif
diff --git a/tools/lib/str_error_r.c b/tools/lib/str_error_r.c
index 503ae07..9ab2d0a 100644
--- a/tools/lib/str_error_r.c
+++ b/tools/lib/str_error_r.c
@@ -21,6 +21,6 @@
 {
 	int err = strerror_r(errnum, buf, buflen);
 	if (err)
-		snprintf(buf, buflen, "INTERNAL ERROR: strerror_r(%d, %p, %zd)=%d", errnum, buf, buflen, err);
+		snprintf(buf, buflen, "INTERNAL ERROR: strerror_r(%d, [buf], %zd)=%d", errnum, buflen, err);
 	return buf;
 }
diff --git a/tools/lib/subcmd/pager.c b/tools/lib/subcmd/pager.c
index 6518bea..68af60f 100644
--- a/tools/lib/subcmd/pager.c
+++ b/tools/lib/subcmd/pager.c
@@ -29,10 +29,13 @@
 	 * have real input
 	 */
 	fd_set in;
+	fd_set exception;
 
 	FD_ZERO(&in);
+	FD_ZERO(&exception);
 	FD_SET(0, &in);
-	select(1, &in, NULL, &in, NULL);
+	FD_SET(0, &exception);
+	select(1, &in, NULL, &exception, NULL);
 
 	setenv("LESS", "FRSX", 0);
 }
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 664c90c..6694753 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -4927,21 +4927,22 @@
 				else
 					ls = 2;
 
-				if (*(ptr+1) == 'F' || *(ptr+1) == 'f' ||
-				    *(ptr+1) == 'S' || *(ptr+1) == 's') {
+				if (isalnum(ptr[1]))
 					ptr++;
+
+				if (*ptr == 'F' || *ptr == 'f' ||
+				    *ptr == 'S' || *ptr == 's') {
 					show_func = *ptr;
-				} else if (*(ptr+1) == 'M' || *(ptr+1) == 'm') {
-					print_mac_arg(s, *(ptr+1), data, size, event, arg);
-					ptr++;
+				} else if (*ptr == 'M' || *ptr == 'm') {
+					print_mac_arg(s, *ptr, data, size, event, arg);
 					arg = arg->next;
 					break;
-				} else if (*(ptr+1) == 'I' || *(ptr+1) == 'i') {
+				} else if (*ptr == 'I' || *ptr == 'i') {
 					int n;
 
-					n = print_ip_arg(s, ptr+1, data, size, event, arg);
+					n = print_ip_arg(s, ptr, data, size, event, arg);
 					if (n > 0) {
-						ptr += n;
+						ptr += n - 1;
 						arg = arg->next;
 						break;
 					}
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 7c214ce..5e10ba7 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -1879,17 +1879,25 @@
 	struct pevent *pevent;
 	unsigned long long addr;
 	const char *val = NULL;
+	unsigned int size;
 	char hex[64];
 
 	/* If the field is not a string convert it */
 	if (arg->str.field->flags & FIELD_IS_STRING) {
 		val = record->data + arg->str.field->offset;
+		size = arg->str.field->size;
+
+		if (arg->str.field->flags & FIELD_IS_DYNAMIC) {
+			addr = *(unsigned int *)val;
+			val = record->data + (addr & 0xffff);
+			size = addr >> 16;
+		}
 
 		/*
 		 * We need to copy the data since we can't be sure the field
 		 * is null terminated.
 		 */
-		if (*(val + arg->str.field->size - 1)) {
+		if (*(val + size - 1)) {
 			/* copy it */
 			memcpy(arg->str.buffer, val, arg->str.field->size);
 			/* the buffer is already NULL terminated */
diff --git a/tools/objtool/Build b/tools/objtool/Build
index d6cdece..749becd 100644
--- a/tools/objtool/Build
+++ b/tools/objtool/Build
@@ -1,5 +1,9 @@
 objtool-y += arch/$(SRCARCH)/
 objtool-y += builtin-check.o
+objtool-y += builtin-orc.o
+objtool-y += check.o
+objtool-y += orc_gen.o
+objtool-y += orc_dump.o
 objtool-y += elf.o
 objtool-y += special.o
 objtool-y += objtool.o
diff --git a/tools/objtool/Documentation/stack-validation.txt b/tools/objtool/Documentation/stack-validation.txt
index 05536d8..3995735 100644
--- a/tools/objtool/Documentation/stack-validation.txt
+++ b/tools/objtool/Documentation/stack-validation.txt
@@ -11,9 +11,6 @@
 It enforces a set of rules on asm code and C inline assembly code so
 that stack traces can be reliable.
 
-Currently it only checks frame pointer usage, but there are plans to add
-CFI validation for C files and CFI generation for asm files.
-
 For each function, it recursively follows all possible code paths and
 validates the correct frame pointer state at each instruction.
 
@@ -23,6 +20,10 @@
 instructions).  Similarly, it knows how to follow switch statements, for
 which gcc sometimes uses jump tables.
 
+(Objtool also has an 'orc generate' subcommand which generates debuginfo
+for the ORC unwinder.  See Documentation/x86/orc-unwinder.txt in the
+kernel tree for more details.)
+
 
 Why do we need stack metadata validation?
 -----------------------------------------
@@ -93,62 +94,24 @@
        or at the very end of the function after the stack frame has been
        destroyed.  This is an inherent limitation of frame pointers.
 
-b) 100% reliable stack traces for DWARF enabled kernels
+b) ORC (Oops Rewind Capability) unwind table generation
 
-   (NOTE: This is not yet implemented)
+   An alternative to frame pointers and DWARF, ORC unwind data can be
+   used to walk the stack.  Unlike frame pointers, ORC data is out of
+   band.  So it doesn't affect runtime performance and it can be
+   reliable even when interrupts or exceptions are involved.
 
-   As an alternative to frame pointers, DWARF Call Frame Information
-   (CFI) metadata can be used to walk the stack.  Unlike frame pointers,
-   CFI metadata is out of band.  So it doesn't affect runtime
-   performance and it can be reliable even when interrupts or exceptions
-   are involved.
-
-   For C code, gcc automatically generates DWARF CFI metadata.  But for
-   asm code, generating CFI is a tedious manual approach which requires
-   manually placed .cfi assembler macros to be scattered throughout the
-   code.  It's clumsy and very easy to get wrong, and it makes the real
-   code harder to read.
-
-   Stacktool will improve this situation in several ways.  For code
-   which already has CFI annotations, it will validate them.  For code
-   which doesn't have CFI annotations, it will generate them.  So an
-   architecture can opt to strip out all the manual .cfi annotations
-   from their asm code and have objtool generate them instead.
-
-   We might also add a runtime stack validation debug option where we
-   periodically walk the stack from schedule() and/or an NMI to ensure
-   that the stack metadata is sane and that we reach the bottom of the
-   stack.
-
-   So the benefit of objtool here will be that external tooling should
-   always show perfect stack traces.  And the same will be true for
-   kernel warning/oops traces if the architecture has a runtime DWARF
-   unwinder.
+   For more details, see Documentation/x86/orc-unwinder.txt.
 
 c) Higher live patching compatibility rate
 
-   (NOTE: This is not yet implemented)
+   Livepatch has an optional "consistency model", which is needed for
+   more complex patches.  In order for the consistency model to work,
+   stack traces need to be reliable (or an unreliable condition needs to
+   be detectable).  Objtool makes that possible.
 
-   Currently with CONFIG_LIVEPATCH there's a basic live patching
-   framework which is safe for roughly 85-90% of "security" fixes.  But
-   patches can't have complex features like function dependency or
-   prototype changes, or data structure changes.
-
-   There's a strong need to support patches which have the more complex
-   features so that the patch compatibility rate for security fixes can
-   eventually approach something resembling 100%.  To achieve that, a
-   "consistency model" is needed, which allows tasks to be safely
-   transitioned from an unpatched state to a patched state.
-
-   One of the key requirements of the currently proposed livepatch
-   consistency model [*] is that it needs to walk the stack of each
-   sleeping task to determine if it can be transitioned to the patched
-   state.  If objtool can ensure that stack traces are reliable, this
-   consistency model can be used and the live patching compatibility
-   rate can be improved significantly.
-
-   [*] https://lkml.kernel.org/r/cover.1423499826.git.jpoimboe@redhat.com
-
+   For more details, see the livepatch documentation in the Linux kernel
+   source tree at Documentation/livepatch/livepatch.txt.
 
 Rules
 -----
@@ -201,80 +164,84 @@
    return normally.
 
 
-Errors in .S files
-------------------
+Objtool warnings
+----------------
 
-If you're getting an error in a compiled .S file which you don't
-understand, first make sure that the affected code follows the above
-rules.
+For asm files, if you're getting an error which doesn't make sense,
+first make sure that the affected code follows the above rules.
+
+For C files, the common culprits are inline asm statements and calls to
+"noreturn" functions.  See below for more details.
+
+Another possible cause for errors in C code is if the Makefile removes
+-fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
 
 Here are some examples of common warnings reported by objtool, what
 they mean, and suggestions for how to fix them.
 
 
-1. asm_file.o: warning: objtool: func()+0x128: call without frame pointer save/setup
+1. file.o: warning: objtool: func()+0x128: call without frame pointer save/setup
 
    The func() function made a function call without first saving and/or
-   updating the frame pointer.
+   updating the frame pointer, and CONFIG_FRAME_POINTER is enabled.
 
-   If func() is indeed a callable function, add proper frame pointer
-   logic using the FRAME_BEGIN and FRAME_END macros.  Otherwise, remove
-   its ELF function annotation by changing ENDPROC to END.
+   If the error is for an asm file, and func() is indeed a callable
+   function, add proper frame pointer logic using the FRAME_BEGIN and
+   FRAME_END macros.  Otherwise, if it's not a callable function, remove
+   its ELF function annotation by changing ENDPROC to END, and instead
+   use the manual unwind hint macros in asm/unwind_hints.h.
 
-   If you're getting this error in a .c file, see the "Errors in .c
-   files" section.
+   If it's a GCC-compiled .c file, the error may be because the function
+   uses an inline asm() statement which has a "call" instruction.  An
+   asm() statement with a call instruction must declare the use of the
+   stack pointer in its output operand.  On x86_64, this means adding
+   the ASM_CALL_CONSTRAINT as an output constraint:
+
+     asm volatile("call func" : ASM_CALL_CONSTRAINT);
+
+   Otherwise the stack frame may not get created before the call.
 
 
-2. asm_file.o: warning: objtool: .text+0x53: return instruction outside of a callable function
+2. file.o: warning: objtool: .text+0x53: unreachable instruction
 
-   A return instruction was detected, but objtool couldn't find a way
-   for a callable function to reach the instruction.
+   Objtool couldn't find a code path to reach the instruction.
 
-   If the return instruction is inside (or reachable from) a callable
-   function, the function needs to be annotated with the ENTRY/ENDPROC
-   macros.
+   If the error is for an asm file, and the instruction is inside (or
+   reachable from) a callable function, the function should be annotated
+   with the ENTRY/ENDPROC macros (ENDPROC is the important one).
+   Otherwise, the code should probably be annotated with the unwind hint
+   macros in asm/unwind_hints.h so objtool and the unwinder can know the
+   stack state associated with the code.
 
-   If you _really_ need a return instruction outside of a function, and
-   are 100% sure that it won't affect stack traces, you can tell
-   objtool to ignore it.  See the "Adding exceptions" section below.
-
-
-3. asm_file.o: warning: objtool: func()+0x9: function has unreachable instruction
-
-   The instruction lives inside of a callable function, but there's no
-   possible control flow path from the beginning of the function to the
-   instruction.
-
-   If the instruction is actually needed, and it's actually in a
-   callable function, ensure that its function is properly annotated
-   with ENTRY/ENDPROC.
+   If you're 100% sure the code won't affect stack traces, or if you're
+   a just a bad person, you can tell objtool to ignore it.  See the
+   "Adding exceptions" section below.
 
    If it's not actually in a callable function (e.g. kernel entry code),
    change ENDPROC to END.
 
 
-4. asm_file.o: warning: objtool: func(): can't find starting instruction
+4. file.o: warning: objtool: func(): can't find starting instruction
    or
-   asm_file.o: warning: objtool: func()+0x11dd: can't decode instruction
+   file.o: warning: objtool: func()+0x11dd: can't decode instruction
 
-   Did you put data in a text section?  If so, that can confuse
+   Does the file have data in a text section?  If so, that can confuse
    objtool's instruction decoder.  Move the data to a more appropriate
    section like .data or .rodata.
 
 
-5. asm_file.o: warning: objtool: func()+0x6: kernel entry/exit from callable instruction
+5. file.o: warning: objtool: func()+0x6: unsupported instruction in callable function
 
-   This is a kernel entry/exit instruction like sysenter or sysret.
-   Such instructions aren't allowed in a callable function, and are most
-   likely part of the kernel entry code.
-
-   If the instruction isn't actually in a callable function, change
-   ENDPROC to END.
+   This is a kernel entry/exit instruction like sysenter or iret.  Such
+   instructions aren't allowed in a callable function, and are most
+   likely part of the kernel entry code.  They should usually not have
+   the callable function annotation (ENDPROC) and should always be
+   annotated with the unwind hint macros in asm/unwind_hints.h.
 
 
-6. asm_file.o: warning: objtool: func()+0x26: sibling call from callable instruction with changed frame pointer
+6. file.o: warning: objtool: func()+0x26: sibling call from callable instruction with modified stack frame
 
-   This is a dynamic jump or a jump to an undefined symbol.  Stacktool
+   This is a dynamic jump or a jump to an undefined symbol.  Objtool
    assumed it's a sibling call and detected that the frame pointer
    wasn't first restored to its original state.
 
@@ -282,24 +249,28 @@
    destination code to the local file.
 
    If the instruction is not actually in a callable function (e.g.
-   kernel entry code), change ENDPROC to END.
+   kernel entry code), change ENDPROC to END and annotate manually with
+   the unwind hint macros in asm/unwind_hints.h.
 
 
-7. asm_file: warning: objtool: func()+0x5c: frame pointer state mismatch
+7. file: warning: objtool: func()+0x5c: stack state mismatch
 
    The instruction's frame pointer state is inconsistent, depending on
    which execution path was taken to reach the instruction.
 
-   Make sure the function pushes and sets up the frame pointer (for
-   x86_64, this means rbp) at the beginning of the function and pops it
-   at the end of the function.  Also make sure that no other code in the
-   function touches the frame pointer.
+   Make sure that, when CONFIG_FRAME_POINTER is enabled, the function
+   pushes and sets up the frame pointer (for x86_64, this means rbp) at
+   the beginning of the function and pops it at the end of the function.
+   Also make sure that no other code in the function touches the frame
+   pointer.
+
+   Another possibility is that the code has some asm or inline asm which
+   does some unusual things to the stack or the frame pointer.  In such
+   cases it's probably appropriate to use the unwind hint macros in
+   asm/unwind_hints.h.
 
 
-Errors in .c files
-------------------
-
-1. c_file.o: warning: objtool: funcA() falls through to next function funcB()
+8. file.o: warning: objtool: funcA() falls through to next function funcB()
 
    This means that funcA() doesn't end with a return instruction or an
    unconditional jump, and that objtool has determined that the function
@@ -318,21 +289,6 @@
       might be corrupt due to a gcc bug.  For more details, see:
       https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70646
 
-2. If you're getting any other objtool error in a compiled .c file, it
-   may be because the file uses an asm() statement which has a "call"
-   instruction.  An asm() statement with a call instruction must declare
-   the use of the stack pointer in its output operand.  On x86_64, this
-   means adding the ASM_CALL_CONSTRAINT as an output constraint:
-
-     asm volatile("call func" : ASM_CALL_CONSTRAINT);
-
-   Otherwise the stack frame may not get created before the call.
-
-3. Another possible cause for errors in C code is if the Makefile removes
-   -fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
-
-Also see the above section for .S file errors for more information what
-the individual error messages mean.
 
 If the error doesn't seem to make sense, it could be a bug in objtool.
 Feel free to ask the objtool maintainer for help.
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 041b493..e6acc28 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 include ../scripts/Makefile.include
 include ../scripts/Makefile.arch
 
@@ -6,17 +7,19 @@
 endif
 
 # always use the host compiler
-CC = gcc
-LD = ld
-AR = ar
+HOSTCC	?= gcc
+HOSTLD	?= ld
+CC	 = $(HOSTCC)
+LD	 = $(HOSTLD)
+AR	 = ar
 
 ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 endif
 
 SUBCMD_SRCDIR		= $(srctree)/tools/lib/subcmd/
-LIBSUBCMD_OUTPUT	= $(if $(OUTPUT),$(OUTPUT),$(PWD)/)
+LIBSUBCMD_OUTPUT	= $(if $(OUTPUT),$(OUTPUT),$(CURDIR)/)
 LIBSUBCMD		= $(LIBSUBCMD_OUTPUT)libsubcmd.a
 
 OBJTOOL    := $(OUTPUT)objtool
@@ -24,8 +27,11 @@
 
 all: $(OBJTOOL)
 
-INCLUDES := -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi
-CFLAGS   += -Wall -Werror $(EXTRA_WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
+INCLUDES := -I$(srctree)/tools/include \
+	    -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
+	    -I$(srctree)/tools/objtool/arch/$(ARCH)/include
+WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
+CFLAGS   += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
 LDFLAGS  += -lelf $(LIBSUBCMD)
 
 # Allow old libelf to be used:
@@ -39,19 +45,8 @@
 $(OBJTOOL_IN): fixdep FORCE
 	@$(MAKE) $(build)=objtool
 
-# Busybox's diff doesn't have -I, avoid warning in that case
-#
 $(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
-	@(diff -I 2>&1 | grep -q 'option requires an argument' && \
-	test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
-	diff -I'^#include' arch/x86/insn/insn.c ../../arch/x86/lib/insn.c >/dev/null && \
-	diff -I'^#include' arch/x86/insn/inat.c ../../arch/x86/lib/inat.c >/dev/null && \
-	diff arch/x86/insn/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \
-	diff arch/x86/insn/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \
-	diff -I'^#include' arch/x86/insn/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \
-	diff -I'^#include' arch/x86/insn/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \
-	diff -I'^#include' arch/x86/insn/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \
-	|| echo "warning: objtool: x86 instruction decoder differs from kernel" >&2 )) || true
+	@$(CONFIG_SHELL) ./sync-check.sh
 	$(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
 
 
@@ -61,7 +56,7 @@
 clean:
 	$(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL)
 	$(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
-	$(Q)$(RM) $(OUTPUT)arch/x86/insn/inat-tables.c $(OUTPUT)fixdep
+	$(Q)$(RM) $(OUTPUT)arch/x86/lib/inat-tables.c $(OUTPUT)fixdep
 
 FORCE:
 
diff --git a/tools/objtool/arch.h b/tools/objtool/arch.h
index f7350fc..b0d7dc3 100644
--- a/tools/objtool/arch.h
+++ b/tools/objtool/arch.h
@@ -19,26 +19,64 @@
 #define _ARCH_H
 
 #include <stdbool.h>
+#include <linux/list.h>
 #include "elf.h"
+#include "cfi.h"
 
-#define INSN_FP_SAVE		1
-#define INSN_FP_SETUP		2
-#define INSN_FP_RESTORE		3
-#define INSN_JUMP_CONDITIONAL	4
-#define INSN_JUMP_UNCONDITIONAL	5
-#define INSN_JUMP_DYNAMIC	6
-#define INSN_CALL		7
-#define INSN_CALL_DYNAMIC	8
-#define INSN_RETURN		9
-#define INSN_CONTEXT_SWITCH	10
-#define INSN_BUG		11
-#define INSN_NOP		12
-#define INSN_OTHER		13
+#define INSN_JUMP_CONDITIONAL	1
+#define INSN_JUMP_UNCONDITIONAL	2
+#define INSN_JUMP_DYNAMIC	3
+#define INSN_CALL		4
+#define INSN_CALL_DYNAMIC	5
+#define INSN_RETURN		6
+#define INSN_CONTEXT_SWITCH	7
+#define INSN_STACK		8
+#define INSN_BUG		9
+#define INSN_NOP		10
+#define INSN_OTHER		11
 #define INSN_LAST		INSN_OTHER
 
+enum op_dest_type {
+	OP_DEST_REG,
+	OP_DEST_REG_INDIRECT,
+	OP_DEST_MEM,
+	OP_DEST_PUSH,
+	OP_DEST_LEAVE,
+};
+
+struct op_dest {
+	enum op_dest_type type;
+	unsigned char reg;
+	int offset;
+};
+
+enum op_src_type {
+	OP_SRC_REG,
+	OP_SRC_REG_INDIRECT,
+	OP_SRC_CONST,
+	OP_SRC_POP,
+	OP_SRC_ADD,
+	OP_SRC_AND,
+};
+
+struct op_src {
+	enum op_src_type type;
+	unsigned char reg;
+	int offset;
+};
+
+struct stack_op {
+	struct op_dest dest;
+	struct op_src src;
+};
+
+void arch_initial_func_cfi_state(struct cfi_state *state);
+
 int arch_decode_instruction(struct elf *elf, struct section *sec,
 			    unsigned long offset, unsigned int maxlen,
 			    unsigned int *len, unsigned char *type,
-			    unsigned long *displacement);
+			    unsigned long *immediate, struct stack_op *op);
+
+bool arch_callee_saved_reg(unsigned char reg);
 
 #endif /* _ARCH_H */
diff --git a/tools/objtool/arch/x86/Build b/tools/objtool/arch/x86/Build
index debbdb0..b998412 100644
--- a/tools/objtool/arch/x86/Build
+++ b/tools/objtool/arch/x86/Build
@@ -1,12 +1,12 @@
 objtool-y += decode.o
 
-inat_tables_script = arch/x86/insn/gen-insn-attr-x86.awk
-inat_tables_maps = arch/x86/insn/x86-opcode-map.txt
+inat_tables_script = arch/x86/tools/gen-insn-attr-x86.awk
+inat_tables_maps = arch/x86/lib/x86-opcode-map.txt
 
-$(OUTPUT)arch/x86/insn/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
+$(OUTPUT)arch/x86/lib/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
 	$(call rule_mkdir)
 	$(Q)$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@
 
-$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/insn/inat-tables.c
+$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/lib/inat-tables.c
 
-CFLAGS_decode.o += -I$(OUTPUT)arch/x86/insn
+CFLAGS_decode.o += -I$(OUTPUT)arch/x86/lib
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 039636f..540a209 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -19,14 +19,25 @@
 #include <stdlib.h>
 
 #define unlikely(cond) (cond)
-#include "insn/insn.h"
-#include "insn/inat.c"
-#include "insn/insn.c"
+#include <asm/insn.h>
+#include "lib/inat.c"
+#include "lib/insn.c"
 
 #include "../../elf.h"
 #include "../../arch.h"
 #include "../../warn.h"
 
+static unsigned char op_to_cfi_reg[][2] = {
+	{CFI_AX, CFI_R8},
+	{CFI_CX, CFI_R9},
+	{CFI_DX, CFI_R10},
+	{CFI_BX, CFI_R11},
+	{CFI_SP, CFI_R12},
+	{CFI_BP, CFI_R13},
+	{CFI_SI, CFI_R14},
+	{CFI_DI, CFI_R15},
+};
+
 static int is_x86_64(struct elf *elf)
 {
 	switch (elf->ehdr.e_machine) {
@@ -40,24 +51,50 @@
 	}
 }
 
+bool arch_callee_saved_reg(unsigned char reg)
+{
+	switch (reg) {
+	case CFI_BP:
+	case CFI_BX:
+	case CFI_R12:
+	case CFI_R13:
+	case CFI_R14:
+	case CFI_R15:
+		return true;
+
+	case CFI_AX:
+	case CFI_CX:
+	case CFI_DX:
+	case CFI_SI:
+	case CFI_DI:
+	case CFI_SP:
+	case CFI_R8:
+	case CFI_R9:
+	case CFI_R10:
+	case CFI_R11:
+	case CFI_RA:
+	default:
+		return false;
+	}
+}
+
 int arch_decode_instruction(struct elf *elf, struct section *sec,
 			    unsigned long offset, unsigned int maxlen,
 			    unsigned int *len, unsigned char *type,
-			    unsigned long *immediate)
+			    unsigned long *immediate, struct stack_op *op)
 {
 	struct insn insn;
-	int x86_64;
-	unsigned char op1, op2, ext;
+	int x86_64, sign;
+	unsigned char op1, op2, rex = 0, rex_b = 0, rex_r = 0, rex_w = 0,
+		      rex_x = 0, modrm = 0, modrm_mod = 0, modrm_rm = 0,
+		      modrm_reg = 0, sib = 0;
 
 	x86_64 = is_x86_64(elf);
 	if (x86_64 == -1)
 		return -1;
 
-	insn_init(&insn, (void *)(sec->data + offset), maxlen, x86_64);
+	insn_init(&insn, sec->data->d_buf + offset, maxlen, x86_64);
 	insn_get_length(&insn);
-	insn_get_opcode(&insn);
-	insn_get_modrm(&insn);
-	insn_get_immediate(&insn);
 
 	if (!insn_complete(&insn)) {
 		WARN_FUNC("can't decode instruction", sec, offset);
@@ -73,70 +110,317 @@
 	op1 = insn.opcode.bytes[0];
 	op2 = insn.opcode.bytes[1];
 
+	if (insn.rex_prefix.nbytes) {
+		rex = insn.rex_prefix.bytes[0];
+		rex_w = X86_REX_W(rex) >> 3;
+		rex_r = X86_REX_R(rex) >> 2;
+		rex_x = X86_REX_X(rex) >> 1;
+		rex_b = X86_REX_B(rex);
+	}
+
+	if (insn.modrm.nbytes) {
+		modrm = insn.modrm.bytes[0];
+		modrm_mod = X86_MODRM_MOD(modrm);
+		modrm_reg = X86_MODRM_REG(modrm);
+		modrm_rm = X86_MODRM_RM(modrm);
+	}
+
+	if (insn.sib.nbytes)
+		sib = insn.sib.bytes[0];
+
 	switch (op1) {
-	case 0x55:
-		if (!insn.rex_prefix.nbytes)
-			/* push rbp */
-			*type = INSN_FP_SAVE;
+
+	case 0x1:
+	case 0x29:
+		if (rex_w && !rex_b && modrm_mod == 3 && modrm_rm == 4) {
+
+			/* add/sub reg, %rsp */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_ADD;
+			op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_SP;
+		}
 		break;
 
-	case 0x5d:
-		if (!insn.rex_prefix.nbytes)
-			/* pop rbp */
-			*type = INSN_FP_RESTORE;
+	case 0x50 ... 0x57:
+
+		/* push reg */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_REG;
+		op->src.reg = op_to_cfi_reg[op1 & 0x7][rex_b];
+		op->dest.type = OP_DEST_PUSH;
+
+		break;
+
+	case 0x58 ... 0x5f:
+
+		/* pop reg */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_POP;
+		op->dest.type = OP_DEST_REG;
+		op->dest.reg = op_to_cfi_reg[op1 & 0x7][rex_b];
+
+		break;
+
+	case 0x68:
+	case 0x6a:
+		/* push immediate */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_CONST;
+		op->dest.type = OP_DEST_PUSH;
 		break;
 
 	case 0x70 ... 0x7f:
 		*type = INSN_JUMP_CONDITIONAL;
 		break;
 
+	case 0x81:
+	case 0x83:
+		if (rex != 0x48)
+			break;
+
+		if (modrm == 0xe4) {
+			/* and imm, %rsp */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_AND;
+			op->src.reg = CFI_SP;
+			op->src.offset = insn.immediate.value;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_SP;
+			break;
+		}
+
+		if (modrm == 0xc4)
+			sign = 1;
+		else if (modrm == 0xec)
+			sign = -1;
+		else
+			break;
+
+		/* add/sub imm, %rsp */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_ADD;
+		op->src.reg = CFI_SP;
+		op->src.offset = insn.immediate.value * sign;
+		op->dest.type = OP_DEST_REG;
+		op->dest.reg = CFI_SP;
+		break;
+
 	case 0x89:
-		if (insn.rex_prefix.nbytes == 1 &&
-		    insn.rex_prefix.bytes[0] == 0x48 &&
-		    insn.modrm.nbytes && insn.modrm.bytes[0] == 0xe5)
-			/* mov rsp, rbp */
-			*type = INSN_FP_SETUP;
+		if (rex_w && !rex_r && modrm_mod == 3 && modrm_reg == 4) {
+
+			/* mov %rsp, reg */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_REG;
+			op->src.reg = CFI_SP;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = op_to_cfi_reg[modrm_rm][rex_b];
+			break;
+		}
+
+		if (rex_w && !rex_b && modrm_mod == 3 && modrm_rm == 4) {
+
+			/* mov reg, %rsp */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_REG;
+			op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_SP;
+			break;
+		}
+
+		/* fallthrough */
+	case 0x88:
+		if (!rex_b &&
+		    (modrm_mod == 1 || modrm_mod == 2) && modrm_rm == 5) {
+
+			/* mov reg, disp(%rbp) */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_REG;
+			op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+			op->dest.type = OP_DEST_REG_INDIRECT;
+			op->dest.reg = CFI_BP;
+			op->dest.offset = insn.displacement.value;
+
+		} else if (rex_w && !rex_b && modrm_rm == 4 && sib == 0x24) {
+
+			/* mov reg, disp(%rsp) */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_REG;
+			op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+			op->dest.type = OP_DEST_REG_INDIRECT;
+			op->dest.reg = CFI_SP;
+			op->dest.offset = insn.displacement.value;
+		}
+
+		break;
+
+	case 0x8b:
+		if (rex_w && !rex_b && modrm_mod == 1 && modrm_rm == 5) {
+
+			/* mov disp(%rbp), reg */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_REG_INDIRECT;
+			op->src.reg = CFI_BP;
+			op->src.offset = insn.displacement.value;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
+
+		} else if (rex_w && !rex_b && sib == 0x24 &&
+			   modrm_mod != 3 && modrm_rm == 4) {
+
+			/* mov disp(%rsp), reg */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_REG_INDIRECT;
+			op->src.reg = CFI_SP;
+			op->src.offset = insn.displacement.value;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
+		}
+
 		break;
 
 	case 0x8d:
-		if (insn.rex_prefix.nbytes &&
-		    insn.rex_prefix.bytes[0] == 0x48 &&
-		    insn.modrm.nbytes && insn.modrm.bytes[0] == 0x2c &&
-		    insn.sib.nbytes && insn.sib.bytes[0] == 0x24)
-			/* lea %(rsp), %rbp */
-			*type = INSN_FP_SETUP;
+		if (sib == 0x24 && rex_w && !rex_b && !rex_x) {
+
+			*type = INSN_STACK;
+			if (!insn.displacement.value) {
+				/* lea (%rsp), reg */
+				op->src.type = OP_SRC_REG;
+			} else {
+				/* lea disp(%rsp), reg */
+				op->src.type = OP_SRC_ADD;
+				op->src.offset = insn.displacement.value;
+			}
+			op->src.reg = CFI_SP;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
+
+		} else if (rex == 0x48 && modrm == 0x65) {
+
+			/* lea disp(%rbp), %rsp */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_ADD;
+			op->src.reg = CFI_BP;
+			op->src.offset = insn.displacement.value;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_SP;
+
+		} else if (rex == 0x49 && modrm == 0x62 &&
+			   insn.displacement.value == -8) {
+
+			/*
+			 * lea -0x8(%r10), %rsp
+			 *
+			 * Restoring rsp back to its original value after a
+			 * stack realignment.
+			 */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_ADD;
+			op->src.reg = CFI_R10;
+			op->src.offset = -8;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_SP;
+
+		} else if (rex == 0x49 && modrm == 0x65 &&
+			   insn.displacement.value == -16) {
+
+			/*
+			 * lea -0x10(%r13), %rsp
+			 *
+			 * Restoring rsp back to its original value after a
+			 * stack realignment.
+			 */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_ADD;
+			op->src.reg = CFI_R13;
+			op->src.offset = -16;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_SP;
+		}
+
+		break;
+
+	case 0x8f:
+		/* pop to mem */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_POP;
+		op->dest.type = OP_DEST_MEM;
 		break;
 
 	case 0x90:
 		*type = INSN_NOP;
 		break;
 
+	case 0x9c:
+		/* pushf */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_CONST;
+		op->dest.type = OP_DEST_PUSH;
+		break;
+
+	case 0x9d:
+		/* popf */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_POP;
+		op->dest.type = OP_DEST_MEM;
+		break;
+
 	case 0x0f:
-		if (op2 >= 0x80 && op2 <= 0x8f)
+
+		if (op2 >= 0x80 && op2 <= 0x8f) {
+
 			*type = INSN_JUMP_CONDITIONAL;
-		else if (op2 == 0x05 || op2 == 0x07 || op2 == 0x34 ||
-			 op2 == 0x35)
+
+		} else if (op2 == 0x05 || op2 == 0x07 || op2 == 0x34 ||
+			   op2 == 0x35) {
+
 			/* sysenter, sysret */
 			*type = INSN_CONTEXT_SWITCH;
-		else if (op2 == 0x0b || op2 == 0xb9)
+
+		} else if (op2 == 0x0b || op2 == 0xb9) {
+
 			/* ud2 */
 			*type = INSN_BUG;
-		else if (op2 == 0x0d || op2 == 0x1f)
+
+		} else if (op2 == 0x0d || op2 == 0x1f) {
+
 			/* nopl/nopw */
 			*type = INSN_NOP;
-		else if (op2 == 0x01 && insn.modrm.nbytes &&
-			 (insn.modrm.bytes[0] == 0xc2 ||
-			  insn.modrm.bytes[0] == 0xd8))
-			/* vmlaunch, vmrun */
-			*type = INSN_CONTEXT_SWITCH;
+
+		} else if (op2 == 0xa0 || op2 == 0xa8) {
+
+			/* push fs/gs */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_CONST;
+			op->dest.type = OP_DEST_PUSH;
+
+		} else if (op2 == 0xa1 || op2 == 0xa9) {
+
+			/* pop fs/gs */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_POP;
+			op->dest.type = OP_DEST_MEM;
+		}
 
 		break;
 
-	case 0xc9: /* leave */
-		*type = INSN_FP_RESTORE;
+	case 0xc9:
+		/*
+		 * leave
+		 *
+		 * equivalent to:
+		 * mov bp, sp
+		 * pop bp
+		 */
+		*type = INSN_STACK;
+		op->dest.type = OP_DEST_LEAVE;
+
 		break;
 
-	case 0xe3: /* jecxz/jrcxz */
+	case 0xe3:
+		/* jecxz/jrcxz */
 		*type = INSN_JUMP_CONDITIONAL;
 		break;
 
@@ -161,14 +445,27 @@
 		break;
 
 	case 0xff:
-		ext = X86_MODRM_REG(insn.modrm.bytes[0]);
-		if (ext == 2 || ext == 3)
+		if (modrm_reg == 2 || modrm_reg == 3)
+
 			*type = INSN_CALL_DYNAMIC;
-		else if (ext == 4)
+
+		else if (modrm_reg == 4)
+
 			*type = INSN_JUMP_DYNAMIC;
-		else if (ext == 5) /*jmpf */
+
+		else if (modrm_reg == 5)
+
+			/* jmpf */
 			*type = INSN_CONTEXT_SWITCH;
 
+		else if (modrm_reg == 6) {
+
+			/* push from mem */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_CONST;
+			op->dest.type = OP_DEST_PUSH;
+		}
+
 		break;
 
 	default:
@@ -179,3 +476,21 @@
 
 	return 0;
 }
+
+void arch_initial_func_cfi_state(struct cfi_state *state)
+{
+	int i;
+
+	for (i = 0; i < CFI_NUM_REGS; i++) {
+		state->regs[i].base = CFI_UNDEFINED;
+		state->regs[i].offset = 0;
+	}
+
+	/* initial CFA (call frame address) */
+	state->cfa.base = CFI_SP;
+	state->cfa.offset = 8;
+
+	/* initial RA (return address) */
+	state->regs[16].base = CFI_CFA;
+	state->regs[16].offset = -8;
+}
diff --git a/tools/objtool/arch/x86/insn/inat.h b/tools/objtool/arch/x86/include/asm/inat.h
similarity index 99%
rename from tools/objtool/arch/x86/insn/inat.h
rename to tools/objtool/arch/x86/include/asm/inat.h
index 125ecd2..02aff08 100644
--- a/tools/objtool/arch/x86/insn/inat.h
+++ b/tools/objtool/arch/x86/include/asm/inat.h
@@ -20,7 +20,7 @@
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  *
  */
-#include "inat_types.h"
+#include <asm/inat_types.h>
 
 /*
  * Internal bits. Don't use bitmasks directly, because these bits are
diff --git a/tools/objtool/arch/x86/insn/inat_types.h b/tools/objtool/arch/x86/include/asm/inat_types.h
similarity index 100%
rename from tools/objtool/arch/x86/insn/inat_types.h
rename to tools/objtool/arch/x86/include/asm/inat_types.h
diff --git a/tools/objtool/arch/x86/insn/insn.h b/tools/objtool/arch/x86/include/asm/insn.h
similarity index 99%
rename from tools/objtool/arch/x86/insn/insn.h
rename to tools/objtool/arch/x86/include/asm/insn.h
index e23578c..b3e32b0 100644
--- a/tools/objtool/arch/x86/insn/insn.h
+++ b/tools/objtool/arch/x86/include/asm/insn.h
@@ -21,7 +21,7 @@
  */
 
 /* insn_attr_t is defined in inat.h */
-#include "inat.h"
+#include <asm/inat.h>
 
 struct insn_field {
 	union {
diff --git a/tools/objtool/arch/x86/include/asm/orc_types.h b/tools/objtool/arch/x86/include/asm/orc_types.h
new file mode 100644
index 0000000..7dc777a
--- /dev/null
+++ b/tools/objtool/arch/x86/include/asm/orc_types.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ORC_TYPES_H
+#define _ORC_TYPES_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/*
+ * The ORC_REG_* registers are base registers which are used to find other
+ * registers on the stack.
+ *
+ * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
+ * address of the previous frame: the caller's SP before it called the current
+ * function.
+ *
+ * ORC_REG_UNDEFINED means the corresponding register's value didn't change in
+ * the current frame.
+ *
+ * The most commonly used base registers are SP and BP -- which the previous SP
+ * is usually based on -- and PREV_SP and UNDEFINED -- which the previous BP is
+ * usually based on.
+ *
+ * The rest of the base registers are needed for special cases like entry code
+ * and GCC realigned stacks.
+ */
+#define ORC_REG_UNDEFINED		0
+#define ORC_REG_PREV_SP			1
+#define ORC_REG_DX			2
+#define ORC_REG_DI			3
+#define ORC_REG_BP			4
+#define ORC_REG_SP			5
+#define ORC_REG_R10			6
+#define ORC_REG_R13			7
+#define ORC_REG_BP_INDIRECT		8
+#define ORC_REG_SP_INDIRECT		9
+#define ORC_REG_MAX			15
+
+/*
+ * ORC_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP (the
+ * caller's SP right before it made the call).  Used for all callable
+ * functions, i.e. all C code and all callable asm functions.
+ *
+ * ORC_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset points
+ * to a fully populated pt_regs from a syscall, interrupt, or exception.
+ *
+ * ORC_TYPE_REGS_IRET: Used in entry code to indicate that sp_reg+sp_offset
+ * points to the iret return frame.
+ *
+ * The UNWIND_HINT macros are used only for the unwind_hint struct.  They
+ * aren't used in struct orc_entry due to size and complexity constraints.
+ * Objtool converts them to real types when it converts the hints to orc
+ * entries.
+ */
+#define ORC_TYPE_CALL			0
+#define ORC_TYPE_REGS			1
+#define ORC_TYPE_REGS_IRET		2
+#define UNWIND_HINT_TYPE_SAVE		3
+#define UNWIND_HINT_TYPE_RESTORE	4
+
+#ifndef __ASSEMBLY__
+/*
+ * This struct is more or less a vastly simplified version of the DWARF Call
+ * Frame Information standard.  It contains only the necessary parts of DWARF
+ * CFI, simplified for ease of access by the in-kernel unwinder.  It tells the
+ * unwinder how to find the previous SP and BP (and sometimes entry regs) on
+ * the stack for a given code address.  Each instance of the struct corresponds
+ * to one or more code locations.
+ */
+struct orc_entry {
+	s16		sp_offset;
+	s16		bp_offset;
+	unsigned	sp_reg:4;
+	unsigned	bp_reg:4;
+	unsigned	type:2;
+};
+
+/*
+ * This struct is used by asm and inline asm code to manually annotate the
+ * location of registers on the stack for the ORC unwinder.
+ *
+ * Type can be either ORC_TYPE_* or UNWIND_HINT_TYPE_*.
+ */
+struct unwind_hint {
+	u32		ip;
+	s16		sp_offset;
+	u8		sp_reg;
+	u8		type;
+};
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ORC_TYPES_H */
diff --git a/tools/objtool/arch/x86/insn/inat.c b/tools/objtool/arch/x86/lib/inat.c
similarity index 98%
rename from tools/objtool/arch/x86/insn/inat.c
rename to tools/objtool/arch/x86/lib/inat.c
index e4bf28e..c1f01a8 100644
--- a/tools/objtool/arch/x86/insn/inat.c
+++ b/tools/objtool/arch/x86/lib/inat.c
@@ -18,7 +18,7 @@
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  *
  */
-#include "insn.h"
+#include <asm/insn.h>
 
 /* Attribute tables are generated from opcode map */
 #include "inat-tables.c"
diff --git a/tools/objtool/arch/x86/insn/insn.c b/tools/objtool/arch/x86/lib/insn.c
similarity index 99%
rename from tools/objtool/arch/x86/insn/insn.c
rename to tools/objtool/arch/x86/lib/insn.c
index ca983e2..1088eb8 100644
--- a/tools/objtool/arch/x86/insn/insn.c
+++ b/tools/objtool/arch/x86/lib/insn.c
@@ -23,8 +23,8 @@
 #else
 #include <string.h>
 #endif
-#include "inat.h"
-#include "insn.h"
+#include <asm/inat.h>
+#include <asm/insn.h>
 
 /* Verify next sizeof(t) bytes can be on the same instruction */
 #define validate_next(t, insn, n)	\
diff --git a/tools/objtool/arch/x86/insn/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
similarity index 100%
rename from tools/objtool/arch/x86/insn/x86-opcode-map.txt
rename to tools/objtool/arch/x86/lib/x86-opcode-map.txt
diff --git a/tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
similarity index 100%
rename from tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk
rename to tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index a688a85..694abc6 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
+ * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -25,1300 +25,35 @@
  * For more information, see tools/objtool/Documentation/stack-validation.txt.
  */
 
-#include <string.h>
-#include <stdlib.h>
 #include <subcmd/parse-options.h>
-
 #include "builtin.h"
-#include "elf.h"
-#include "special.h"
-#include "arch.h"
-#include "warn.h"
+#include "check.h"
 
-#include <linux/hashtable.h>
+bool no_fp, no_unreachable, retpoline, module;
 
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-
-#define STATE_FP_SAVED		0x1
-#define STATE_FP_SETUP		0x2
-#define STATE_FENTRY		0x4
-
-struct instruction {
-	struct list_head list;
-	struct hlist_node hash;
-	struct section *sec;
-	unsigned long offset;
-	unsigned int len, state;
-	unsigned char type;
-	unsigned long immediate;
-	bool alt_group, visited, ignore_alts;
-	struct symbol *call_dest;
-	struct instruction *jump_dest;
-	struct list_head alts;
-	struct symbol *func;
-};
-
-struct alternative {
-	struct list_head list;
-	struct instruction *insn;
-};
-
-struct objtool_file {
-	struct elf *elf;
-	struct list_head insn_list;
-	DECLARE_HASHTABLE(insn_hash, 16);
-	struct section *rodata, *whitelist;
-	bool ignore_unreachables, c_file;
-};
-
-const char *objname;
-static bool nofp;
-
-static struct instruction *find_insn(struct objtool_file *file,
-				     struct section *sec, unsigned long offset)
-{
-	struct instruction *insn;
-
-	hash_for_each_possible(file->insn_hash, insn, hash, offset)
-		if (insn->sec == sec && insn->offset == offset)
-			return insn;
-
-	return NULL;
-}
-
-static struct instruction *next_insn_same_sec(struct objtool_file *file,
-					      struct instruction *insn)
-{
-	struct instruction *next = list_next_entry(insn, list);
-
-	if (&next->list == &file->insn_list || next->sec != insn->sec)
-		return NULL;
-
-	return next;
-}
-
-static bool gcov_enabled(struct objtool_file *file)
-{
-	struct section *sec;
-	struct symbol *sym;
-
-	list_for_each_entry(sec, &file->elf->sections, list)
-		list_for_each_entry(sym, &sec->symbol_list, list)
-			if (!strncmp(sym->name, "__gcov_.", 8))
-				return true;
-
-	return false;
-}
-
-#define for_each_insn(file, insn)					\
-	list_for_each_entry(insn, &file->insn_list, list)
-
-#define func_for_each_insn(file, func, insn)				\
-	for (insn = find_insn(file, func->sec, func->offset);		\
-	     insn && &insn->list != &file->insn_list &&			\
-		insn->sec == func->sec &&				\
-		insn->offset < func->offset + func->len;		\
-	     insn = list_next_entry(insn, list))
-
-#define func_for_each_insn_continue_reverse(file, func, insn)		\
-	for (insn = list_prev_entry(insn, list);			\
-	     &insn->list != &file->insn_list &&				\
-		insn->sec == func->sec && insn->offset >= func->offset;	\
-	     insn = list_prev_entry(insn, list))
-
-#define sec_for_each_insn_from(file, insn)				\
-	for (; insn; insn = next_insn_same_sec(file, insn))
-
-
-/*
- * Check if the function has been manually whitelisted with the
- * STACK_FRAME_NON_STANDARD macro, or if it should be automatically whitelisted
- * due to its use of a context switching instruction.
- */
-static bool ignore_func(struct objtool_file *file, struct symbol *func)
-{
-	struct rela *rela;
-	struct instruction *insn;
-
-	/* check for STACK_FRAME_NON_STANDARD */
-	if (file->whitelist && file->whitelist->rela)
-		list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
-			if (rela->sym->type == STT_SECTION &&
-			    rela->sym->sec == func->sec &&
-			    rela->addend == func->offset)
-				return true;
-			if (rela->sym->type == STT_FUNC && rela->sym == func)
-				return true;
-		}
-
-	/* check if it has a context switching instruction */
-	func_for_each_insn(file, func, insn)
-		if (insn->type == INSN_CONTEXT_SWITCH)
-			return true;
-
-	return false;
-}
-
-/*
- * This checks to see if the given function is a "noreturn" function.
- *
- * For global functions which are outside the scope of this object file, we
- * have to keep a manual list of them.
- *
- * For local functions, we have to detect them manually by simply looking for
- * the lack of a return instruction.
- *
- * Returns:
- *  -1: error
- *   0: no dead end
- *   1: dead end
- */
-static int __dead_end_function(struct objtool_file *file, struct symbol *func,
-			       int recursion)
-{
-	int i;
-	struct instruction *insn;
-	bool empty = true;
-
-	/*
-	 * Unfortunately these have to be hard coded because the noreturn
-	 * attribute isn't provided in ELF data.
-	 */
-	static const char * const global_noreturns[] = {
-		"__stack_chk_fail",
-		"panic",
-		"do_exit",
-		"do_task_dead",
-		"__module_put_and_exit",
-		"complete_and_exit",
-		"kvm_spurious_fault",
-		"__reiserfs_panic",
-		"lbug_with_loc"
-	};
-
-	if (func->bind == STB_WEAK)
-		return 0;
-
-	if (func->bind == STB_GLOBAL)
-		for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
-			if (!strcmp(func->name, global_noreturns[i]))
-				return 1;
-
-	if (!func->sec)
-		return 0;
-
-	func_for_each_insn(file, func, insn) {
-		empty = false;
-
-		if (insn->type == INSN_RETURN)
-			return 0;
-	}
-
-	if (empty)
-		return 0;
-
-	/*
-	 * A function can have a sibling call instead of a return.  In that
-	 * case, the function's dead-end status depends on whether the target
-	 * of the sibling call returns.
-	 */
-	func_for_each_insn(file, func, insn) {
-		if (insn->sec != func->sec ||
-		    insn->offset >= func->offset + func->len)
-			break;
-
-		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
-			struct instruction *dest = insn->jump_dest;
-			struct symbol *dest_func;
-
-			if (!dest)
-				/* sibling call to another file */
-				return 0;
-
-			if (dest->sec != func->sec ||
-			    dest->offset < func->offset ||
-			    dest->offset >= func->offset + func->len) {
-				/* local sibling call */
-				dest_func = find_symbol_by_offset(dest->sec,
-								  dest->offset);
-				if (!dest_func)
-					continue;
-
-				if (recursion == 5) {
-					WARN_FUNC("infinite recursion (objtool bug!)",
-						  dest->sec, dest->offset);
-					return -1;
-				}
-
-				return __dead_end_function(file, dest_func,
-							   recursion + 1);
-			}
-		}
-
-		if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts))
-			/* sibling call */
-			return 0;
-	}
-
-	return 1;
-}
-
-static int dead_end_function(struct objtool_file *file, struct symbol *func)
-{
-	return __dead_end_function(file, func, 0);
-}
-
-/*
- * Call the arch-specific instruction decoder for all the instructions and add
- * them to the global instruction list.
- */
-static int decode_instructions(struct objtool_file *file)
-{
-	struct section *sec;
-	struct symbol *func;
-	unsigned long offset;
-	struct instruction *insn;
-	int ret;
-
-	list_for_each_entry(sec, &file->elf->sections, list) {
-
-		if (!(sec->sh.sh_flags & SHF_EXECINSTR))
-			continue;
-
-		for (offset = 0; offset < sec->len; offset += insn->len) {
-			insn = malloc(sizeof(*insn));
-			memset(insn, 0, sizeof(*insn));
-
-			INIT_LIST_HEAD(&insn->alts);
-			insn->sec = sec;
-			insn->offset = offset;
-
-			ret = arch_decode_instruction(file->elf, sec, offset,
-						      sec->len - offset,
-						      &insn->len, &insn->type,
-						      &insn->immediate);
-			if (ret)
-				return ret;
-
-			if (!insn->type || insn->type > INSN_LAST) {
-				WARN_FUNC("invalid instruction type %d",
-					  insn->sec, insn->offset, insn->type);
-				return -1;
-			}
-
-			hash_add(file->insn_hash, &insn->hash, insn->offset);
-			list_add_tail(&insn->list, &file->insn_list);
-		}
-
-		list_for_each_entry(func, &sec->symbol_list, list) {
-			if (func->type != STT_FUNC)
-				continue;
-
-			if (!find_insn(file, sec, func->offset)) {
-				WARN("%s(): can't find starting instruction",
-				     func->name);
-				return -1;
-			}
-
-			func_for_each_insn(file, func, insn)
-				if (!insn->func)
-					insn->func = func;
-		}
-	}
-
-	return 0;
-}
-
-/*
- * Warnings shouldn't be reported for ignored functions.
- */
-static void add_ignores(struct objtool_file *file)
-{
-	struct instruction *insn;
-	struct section *sec;
-	struct symbol *func;
-
-	list_for_each_entry(sec, &file->elf->sections, list) {
-		list_for_each_entry(func, &sec->symbol_list, list) {
-			if (func->type != STT_FUNC)
-				continue;
-
-			if (!ignore_func(file, func))
-				continue;
-
-			func_for_each_insn(file, func, insn)
-				insn->visited = true;
-		}
-	}
-}
-
-/*
- * FIXME: For now, just ignore any alternatives which add retpolines.  This is
- * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
- * But it at least allows objtool to understand the control flow *around* the
- * retpoline.
- */
-static int add_nospec_ignores(struct objtool_file *file)
-{
-	struct section *sec;
-	struct rela *rela;
-	struct instruction *insn;
-
-	sec = find_section_by_name(file->elf, ".rela.discard.nospec");
-	if (!sec)
-		return 0;
-
-	list_for_each_entry(rela, &sec->rela_list, list) {
-		if (rela->sym->type != STT_SECTION) {
-			WARN("unexpected relocation symbol type in %s", sec->name);
-			return -1;
-		}
-
-		insn = find_insn(file, rela->sym->sec, rela->addend);
-		if (!insn) {
-			WARN("bad .discard.nospec entry");
-			return -1;
-		}
-
-		insn->ignore_alts = true;
-	}
-
-	return 0;
-}
-
-/*
- * Find the destination instructions for all jumps.
- */
-static int add_jump_destinations(struct objtool_file *file)
-{
-	struct instruction *insn;
-	struct rela *rela;
-	struct section *dest_sec;
-	unsigned long dest_off;
-
-	for_each_insn(file, insn) {
-		if (insn->type != INSN_JUMP_CONDITIONAL &&
-		    insn->type != INSN_JUMP_UNCONDITIONAL)
-			continue;
-
-		/* skip ignores */
-		if (insn->visited)
-			continue;
-
-		rela = find_rela_by_dest_range(insn->sec, insn->offset,
-					       insn->len);
-		if (!rela) {
-			dest_sec = insn->sec;
-			dest_off = insn->offset + insn->len + insn->immediate;
-		} else if (rela->sym->type == STT_SECTION) {
-			dest_sec = rela->sym->sec;
-			dest_off = rela->addend + 4;
-		} else if (rela->sym->sec->idx) {
-			dest_sec = rela->sym->sec;
-			dest_off = rela->sym->sym.st_value + rela->addend + 4;
-		} else if (strstr(rela->sym->name, "_indirect_thunk_")) {
-			/*
-			 * Retpoline jumps are really dynamic jumps in
-			 * disguise, so convert them accordingly.
-			 */
-			insn->type = INSN_JUMP_DYNAMIC;
-			continue;
-		} else {
-			/* sibling call */
-			insn->jump_dest = 0;
-			continue;
-		}
-
-		insn->jump_dest = find_insn(file, dest_sec, dest_off);
-		if (!insn->jump_dest) {
-
-			/*
-			 * This is a special case where an alt instruction
-			 * jumps past the end of the section.  These are
-			 * handled later in handle_group_alt().
-			 */
-			if (!strcmp(insn->sec->name, ".altinstr_replacement"))
-				continue;
-
-			WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
-				  insn->sec, insn->offset, dest_sec->name,
-				  dest_off);
-			return -1;
-		}
-	}
-
-	return 0;
-}
-
-/*
- * Find the destination instructions for all calls.
- */
-static int add_call_destinations(struct objtool_file *file)
-{
-	struct instruction *insn;
-	unsigned long dest_off;
-	struct rela *rela;
-
-	for_each_insn(file, insn) {
-		if (insn->type != INSN_CALL)
-			continue;
-
-		rela = find_rela_by_dest_range(insn->sec, insn->offset,
-					       insn->len);
-		if (!rela) {
-			dest_off = insn->offset + insn->len + insn->immediate;
-			insn->call_dest = find_symbol_by_offset(insn->sec,
-								dest_off);
-			/*
-			 * FIXME: Thanks to retpolines, it's now considered
-			 * normal for a function to call within itself.  So
-			 * disable this warning for now.
-			 */
-#if 0
-			if (!insn->call_dest) {
-				WARN_FUNC("can't find call dest symbol at offset 0x%lx",
-					  insn->sec, insn->offset, dest_off);
-				return -1;
-			}
-#endif
-		} else if (rela->sym->type == STT_SECTION) {
-			insn->call_dest = find_symbol_by_offset(rela->sym->sec,
-								rela->addend+4);
-			if (!insn->call_dest ||
-			    insn->call_dest->type != STT_FUNC) {
-				WARN_FUNC("can't find call dest symbol at %s+0x%x",
-					  insn->sec, insn->offset,
-					  rela->sym->sec->name,
-					  rela->addend + 4);
-				return -1;
-			}
-		} else
-			insn->call_dest = rela->sym;
-	}
-
-	return 0;
-}
-
-/*
- * The .alternatives section requires some extra special care, over and above
- * what other special sections require:
- *
- * 1. Because alternatives are patched in-place, we need to insert a fake jump
- *    instruction at the end so that validate_branch() skips all the original
- *    replaced instructions when validating the new instruction path.
- *
- * 2. An added wrinkle is that the new instruction length might be zero.  In
- *    that case the old instructions are replaced with noops.  We simulate that
- *    by creating a fake jump as the only new instruction.
- *
- * 3. In some cases, the alternative section includes an instruction which
- *    conditionally jumps to the _end_ of the entry.  We have to modify these
- *    jumps' destinations to point back to .text rather than the end of the
- *    entry in .altinstr_replacement.
- *
- * 4. It has been requested that we don't validate the !POPCNT feature path
- *    which is a "very very small percentage of machines".
- */
-static int handle_group_alt(struct objtool_file *file,
-			    struct special_alt *special_alt,
-			    struct instruction *orig_insn,
-			    struct instruction **new_insn)
-{
-	struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump;
-	unsigned long dest_off;
-
-	last_orig_insn = NULL;
-	insn = orig_insn;
-	sec_for_each_insn_from(file, insn) {
-		if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
-			break;
-
-		if (special_alt->skip_orig)
-			insn->type = INSN_NOP;
-
-		insn->alt_group = true;
-		last_orig_insn = insn;
-	}
-
-	if (!next_insn_same_sec(file, last_orig_insn)) {
-		WARN("%s: don't know how to handle alternatives at end of section",
-		     special_alt->orig_sec->name);
-		return -1;
-	}
-
-	fake_jump = malloc(sizeof(*fake_jump));
-	if (!fake_jump) {
-		WARN("malloc failed");
-		return -1;
-	}
-	memset(fake_jump, 0, sizeof(*fake_jump));
-	INIT_LIST_HEAD(&fake_jump->alts);
-	fake_jump->sec = special_alt->new_sec;
-	fake_jump->offset = -1;
-	fake_jump->type = INSN_JUMP_UNCONDITIONAL;
-	fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
-
-	if (!special_alt->new_len) {
-		*new_insn = fake_jump;
-		return 0;
-	}
-
-	last_new_insn = NULL;
-	insn = *new_insn;
-	sec_for_each_insn_from(file, insn) {
-		if (insn->offset >= special_alt->new_off + special_alt->new_len)
-			break;
-
-		last_new_insn = insn;
-
-		if (insn->type != INSN_JUMP_CONDITIONAL &&
-		    insn->type != INSN_JUMP_UNCONDITIONAL)
-			continue;
-
-		if (!insn->immediate)
-			continue;
-
-		dest_off = insn->offset + insn->len + insn->immediate;
-		if (dest_off == special_alt->new_off + special_alt->new_len)
-			insn->jump_dest = fake_jump;
-
-		if (!insn->jump_dest) {
-			WARN_FUNC("can't find alternative jump destination",
-				  insn->sec, insn->offset);
-			return -1;
-		}
-	}
-
-	if (!last_new_insn) {
-		WARN_FUNC("can't find last new alternative instruction",
-			  special_alt->new_sec, special_alt->new_off);
-		return -1;
-	}
-
-	list_add(&fake_jump->list, &last_new_insn->list);
-
-	return 0;
-}
-
-/*
- * A jump table entry can either convert a nop to a jump or a jump to a nop.
- * If the original instruction is a jump, make the alt entry an effective nop
- * by just skipping the original instruction.
- */
-static int handle_jump_alt(struct objtool_file *file,
-			   struct special_alt *special_alt,
-			   struct instruction *orig_insn,
-			   struct instruction **new_insn)
-{
-	if (orig_insn->type == INSN_NOP)
-		return 0;
-
-	if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
-		WARN_FUNC("unsupported instruction at jump label",
-			  orig_insn->sec, orig_insn->offset);
-		return -1;
-	}
-
-	*new_insn = list_next_entry(orig_insn, list);
-	return 0;
-}
-
-/*
- * Read all the special sections which have alternate instructions which can be
- * patched in or redirected to at runtime.  Each instruction having alternate
- * instruction(s) has them added to its insn->alts list, which will be
- * traversed in validate_branch().
- */
-static int add_special_section_alts(struct objtool_file *file)
-{
-	struct list_head special_alts;
-	struct instruction *orig_insn, *new_insn;
-	struct special_alt *special_alt, *tmp;
-	struct alternative *alt;
-	int ret;
-
-	ret = special_get_alts(file->elf, &special_alts);
-	if (ret)
-		return ret;
-
-	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
-
-		orig_insn = find_insn(file, special_alt->orig_sec,
-				      special_alt->orig_off);
-		if (!orig_insn) {
-			WARN_FUNC("special: can't find orig instruction",
-				  special_alt->orig_sec, special_alt->orig_off);
-			ret = -1;
-			goto out;
-		}
-
-		/* Ignore retpoline alternatives. */
-		if (orig_insn->ignore_alts)
-			continue;
-
-		new_insn = NULL;
-		if (!special_alt->group || special_alt->new_len) {
-			new_insn = find_insn(file, special_alt->new_sec,
-					     special_alt->new_off);
-			if (!new_insn) {
-				WARN_FUNC("special: can't find new instruction",
-					  special_alt->new_sec,
-					  special_alt->new_off);
-				ret = -1;
-				goto out;
-			}
-		}
-
-		if (special_alt->group) {
-			ret = handle_group_alt(file, special_alt, orig_insn,
-					       &new_insn);
-			if (ret)
-				goto out;
-		} else if (special_alt->jump_or_nop) {
-			ret = handle_jump_alt(file, special_alt, orig_insn,
-					      &new_insn);
-			if (ret)
-				goto out;
-		}
-
-		alt = malloc(sizeof(*alt));
-		if (!alt) {
-			WARN("malloc failed");
-			ret = -1;
-			goto out;
-		}
-
-		alt->insn = new_insn;
-		list_add_tail(&alt->list, &orig_insn->alts);
-
-		list_del(&special_alt->list);
-		free(special_alt);
-	}
-
-out:
-	return ret;
-}
-
-static int add_switch_table(struct objtool_file *file, struct symbol *func,
-			    struct instruction *insn, struct rela *table,
-			    struct rela *next_table)
-{
-	struct rela *rela = table;
-	struct instruction *alt_insn;
-	struct alternative *alt;
-
-	list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) {
-		if (rela == next_table)
-			break;
-
-		if (rela->sym->sec != insn->sec ||
-		    rela->addend <= func->offset ||
-		    rela->addend >= func->offset + func->len)
-			break;
-
-		alt_insn = find_insn(file, insn->sec, rela->addend);
-		if (!alt_insn) {
-			WARN("%s: can't find instruction at %s+0x%x",
-			     file->rodata->rela->name, insn->sec->name,
-			     rela->addend);
-			return -1;
-		}
-
-		alt = malloc(sizeof(*alt));
-		if (!alt) {
-			WARN("malloc failed");
-			return -1;
-		}
-
-		alt->insn = alt_insn;
-		list_add_tail(&alt->list, &insn->alts);
-	}
-
-	return 0;
-}
-
-/*
- * find_switch_table() - Given a dynamic jump, find the switch jump table in
- * .rodata associated with it.
- *
- * There are 3 basic patterns:
- *
- * 1. jmpq *[rodata addr](,%reg,8)
- *
- *    This is the most common case by far.  It jumps to an address in a simple
- *    jump table which is stored in .rodata.
- *
- * 2. jmpq *[rodata addr](%rip)
- *
- *    This is caused by a rare GCC quirk, currently only seen in three driver
- *    functions in the kernel, only with certain obscure non-distro configs.
- *
- *    As part of an optimization, GCC makes a copy of an existing switch jump
- *    table, modifies it, and then hard-codes the jump (albeit with an indirect
- *    jump) to use a single entry in the table.  The rest of the jump table and
- *    some of its jump targets remain as dead code.
- *
- *    In such a case we can just crudely ignore all unreachable instruction
- *    warnings for the entire object file.  Ideally we would just ignore them
- *    for the function, but that would require redesigning the code quite a
- *    bit.  And honestly that's just not worth doing: unreachable instruction
- *    warnings are of questionable value anyway, and this is such a rare issue.
- *
- * 3. mov [rodata addr],%reg1
- *    ... some instructions ...
- *    jmpq *(%reg1,%reg2,8)
- *
- *    This is a fairly uncommon pattern which is new for GCC 6.  As of this
- *    writing, there are 11 occurrences of it in the allmodconfig kernel.
- *
- *    TODO: Once we have DWARF CFI and smarter instruction decoding logic,
- *    ensure the same register is used in the mov and jump instructions.
- */
-static struct rela *find_switch_table(struct objtool_file *file,
-				      struct symbol *func,
-				      struct instruction *insn)
-{
-	struct rela *text_rela, *rodata_rela;
-	struct instruction *orig_insn = insn;
-
-	text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
-	if (text_rela && text_rela->sym == file->rodata->sym) {
-		/* case 1 */
-		rodata_rela = find_rela_by_dest(file->rodata,
-						text_rela->addend);
-		if (rodata_rela)
-			return rodata_rela;
-
-		/* case 2 */
-		rodata_rela = find_rela_by_dest(file->rodata,
-						text_rela->addend + 4);
-		if (!rodata_rela)
-			return NULL;
-		file->ignore_unreachables = true;
-		return rodata_rela;
-	}
-
-	/* case 3 */
-	func_for_each_insn_continue_reverse(file, func, insn) {
-		if (insn->type == INSN_JUMP_DYNAMIC)
-			break;
-
-		/* allow small jumps within the range */
-		if (insn->type == INSN_JUMP_UNCONDITIONAL &&
-		    insn->jump_dest &&
-		    (insn->jump_dest->offset <= insn->offset ||
-		     insn->jump_dest->offset > orig_insn->offset))
-		    break;
-
-		/* look for a relocation which references .rodata */
-		text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
-						    insn->len);
-		if (!text_rela || text_rela->sym != file->rodata->sym)
-			continue;
-
-		/*
-		 * Make sure the .rodata address isn't associated with a
-		 * symbol.  gcc jump tables are anonymous data.
-		 */
-		if (find_symbol_containing(file->rodata, text_rela->addend))
-			continue;
-
-		return find_rela_by_dest(file->rodata, text_rela->addend);
-	}
-
-	return NULL;
-}
-
-static int add_func_switch_tables(struct objtool_file *file,
-				  struct symbol *func)
-{
-	struct instruction *insn, *prev_jump = NULL;
-	struct rela *rela, *prev_rela = NULL;
-	int ret;
-
-	func_for_each_insn(file, func, insn) {
-		if (insn->type != INSN_JUMP_DYNAMIC)
-			continue;
-
-		rela = find_switch_table(file, func, insn);
-		if (!rela)
-			continue;
-
-		/*
-		 * We found a switch table, but we don't know yet how big it
-		 * is.  Don't add it until we reach the end of the function or
-		 * the beginning of another switch table in the same function.
-		 */
-		if (prev_jump) {
-			ret = add_switch_table(file, func, prev_jump, prev_rela,
-					       rela);
-			if (ret)
-				return ret;
-		}
-
-		prev_jump = insn;
-		prev_rela = rela;
-	}
-
-	if (prev_jump) {
-		ret = add_switch_table(file, func, prev_jump, prev_rela, NULL);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-/*
- * For some switch statements, gcc generates a jump table in the .rodata
- * section which contains a list of addresses within the function to jump to.
- * This finds these jump tables and adds them to the insn->alts lists.
- */
-static int add_switch_table_alts(struct objtool_file *file)
-{
-	struct section *sec;
-	struct symbol *func;
-	int ret;
-
-	if (!file->rodata || !file->rodata->rela)
-		return 0;
-
-	list_for_each_entry(sec, &file->elf->sections, list) {
-		list_for_each_entry(func, &sec->symbol_list, list) {
-			if (func->type != STT_FUNC)
-				continue;
-
-			ret = add_func_switch_tables(file, func);
-			if (ret)
-				return ret;
-		}
-	}
-
-	return 0;
-}
-
-static int decode_sections(struct objtool_file *file)
-{
-	int ret;
-
-	ret = decode_instructions(file);
-	if (ret)
-		return ret;
-
-	add_ignores(file);
-
-	ret = add_nospec_ignores(file);
-	if (ret)
-		return ret;
-
-	ret = add_jump_destinations(file);
-	if (ret)
-		return ret;
-
-	ret = add_call_destinations(file);
-	if (ret)
-		return ret;
-
-	ret = add_special_section_alts(file);
-	if (ret)
-		return ret;
-
-	ret = add_switch_table_alts(file);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static bool is_fentry_call(struct instruction *insn)
-{
-	if (insn->type == INSN_CALL &&
-	    insn->call_dest->type == STT_NOTYPE &&
-	    !strcmp(insn->call_dest->name, "__fentry__"))
-		return true;
-
-	return false;
-}
-
-static bool has_modified_stack_frame(struct instruction *insn)
-{
-	return (insn->state & STATE_FP_SAVED) ||
-	       (insn->state & STATE_FP_SETUP);
-}
-
-static bool has_valid_stack_frame(struct instruction *insn)
-{
-	return (insn->state & STATE_FP_SAVED) &&
-	       (insn->state & STATE_FP_SETUP);
-}
-
-static unsigned int frame_state(unsigned long state)
-{
-	return (state & (STATE_FP_SAVED | STATE_FP_SETUP));
-}
-
-/*
- * Follow the branch starting at the given instruction, and recursively follow
- * any other branches (jumps).  Meanwhile, track the frame pointer state at
- * each instruction and validate all the rules described in
- * tools/objtool/Documentation/stack-validation.txt.
- */
-static int validate_branch(struct objtool_file *file,
-			   struct instruction *first, unsigned char first_state)
-{
-	struct alternative *alt;
-	struct instruction *insn;
-	struct section *sec;
-	struct symbol *func = NULL;
-	unsigned char state;
-	int ret;
-
-	insn = first;
-	sec = insn->sec;
-	state = first_state;
-
-	if (insn->alt_group && list_empty(&insn->alts)) {
-		WARN_FUNC("don't know how to handle branch to middle of alternative instruction group",
-			  sec, insn->offset);
-		return 1;
-	}
-
-	while (1) {
-		if (file->c_file && insn->func) {
-			if (func && func != insn->func) {
-				WARN("%s() falls through to next function %s()",
-				     func->name, insn->func->name);
-				return 1;
-			}
-
-			func = insn->func;
-		}
-
-		if (insn->visited) {
-			if (frame_state(insn->state) != frame_state(state)) {
-				WARN_FUNC("frame pointer state mismatch",
-					  sec, insn->offset);
-				return 1;
-			}
-
-			return 0;
-		}
-
-		insn->visited = true;
-		insn->state = state;
-
-		list_for_each_entry(alt, &insn->alts, list) {
-			ret = validate_branch(file, alt->insn, state);
-			if (ret)
-				return 1;
-		}
-
-		switch (insn->type) {
-
-		case INSN_FP_SAVE:
-			if (!nofp) {
-				if (state & STATE_FP_SAVED) {
-					WARN_FUNC("duplicate frame pointer save",
-						  sec, insn->offset);
-					return 1;
-				}
-				state |= STATE_FP_SAVED;
-			}
-			break;
-
-		case INSN_FP_SETUP:
-			if (!nofp) {
-				if (state & STATE_FP_SETUP) {
-					WARN_FUNC("duplicate frame pointer setup",
-						  sec, insn->offset);
-					return 1;
-				}
-				state |= STATE_FP_SETUP;
-			}
-			break;
-
-		case INSN_FP_RESTORE:
-			if (!nofp) {
-				if (has_valid_stack_frame(insn))
-					state &= ~STATE_FP_SETUP;
-
-				state &= ~STATE_FP_SAVED;
-			}
-			break;
-
-		case INSN_RETURN:
-			if (!nofp && has_modified_stack_frame(insn)) {
-				WARN_FUNC("return without frame pointer restore",
-					  sec, insn->offset);
-				return 1;
-			}
-			return 0;
-
-		case INSN_CALL:
-			if (is_fentry_call(insn)) {
-				state |= STATE_FENTRY;
-				break;
-			}
-
-			ret = dead_end_function(file, insn->call_dest);
-			if (ret == 1)
-				return 0;
-			if (ret == -1)
-				return 1;
-
-			/* fallthrough */
-		case INSN_CALL_DYNAMIC:
-			if (!nofp && !has_valid_stack_frame(insn)) {
-				WARN_FUNC("call without frame pointer save/setup",
-					  sec, insn->offset);
-				return 1;
-			}
-			break;
-
-		case INSN_JUMP_CONDITIONAL:
-		case INSN_JUMP_UNCONDITIONAL:
-			if (insn->jump_dest) {
-				ret = validate_branch(file, insn->jump_dest,
-						      state);
-				if (ret)
-					return 1;
-			} else if (has_modified_stack_frame(insn)) {
-				WARN_FUNC("sibling call from callable instruction with changed frame pointer",
-					  sec, insn->offset);
-				return 1;
-			} /* else it's a sibling call */
-
-			if (insn->type == INSN_JUMP_UNCONDITIONAL)
-				return 0;
-
-			break;
-
-		case INSN_JUMP_DYNAMIC:
-			if (list_empty(&insn->alts) &&
-			    has_modified_stack_frame(insn)) {
-				WARN_FUNC("sibling call from callable instruction with changed frame pointer",
-					  sec, insn->offset);
-				return 1;
-			}
-
-			return 0;
-
-		case INSN_BUG:
-			return 0;
-
-		default:
-			break;
-		}
-
-		insn = next_insn_same_sec(file, insn);
-		if (!insn) {
-			WARN("%s: unexpected end of section", sec->name);
-			return 1;
-		}
-	}
-
-	return 0;
-}
-
-static bool is_kasan_insn(struct instruction *insn)
-{
-	return (insn->type == INSN_CALL &&
-		!strcmp(insn->call_dest->name, "__asan_handle_no_return"));
-}
-
-static bool is_ubsan_insn(struct instruction *insn)
-{
-	return (insn->type == INSN_CALL &&
-		!strcmp(insn->call_dest->name,
-			"__ubsan_handle_builtin_unreachable"));
-}
-
-static bool ignore_unreachable_insn(struct symbol *func,
-				    struct instruction *insn)
-{
-	int i;
-
-	if (insn->type == INSN_NOP)
-		return true;
-
-	/*
-	 * Check if this (or a subsequent) instruction is related to
-	 * CONFIG_UBSAN or CONFIG_KASAN.
-	 *
-	 * End the search at 5 instructions to avoid going into the weeds.
-	 */
-	for (i = 0; i < 5; i++) {
-
-		if (is_kasan_insn(insn) || is_ubsan_insn(insn))
-			return true;
-
-		if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest) {
-			insn = insn->jump_dest;
-			continue;
-		}
-
-		if (insn->offset + insn->len >= func->offset + func->len)
-			break;
-		insn = list_next_entry(insn, list);
-	}
-
-	return false;
-}
-
-static int validate_functions(struct objtool_file *file)
-{
-	struct section *sec;
-	struct symbol *func;
-	struct instruction *insn;
-	int ret, warnings = 0;
-
-	list_for_each_entry(sec, &file->elf->sections, list) {
-		list_for_each_entry(func, &sec->symbol_list, list) {
-			if (func->type != STT_FUNC)
-				continue;
-
-			insn = find_insn(file, sec, func->offset);
-			if (!insn)
-				continue;
-
-			ret = validate_branch(file, insn, 0);
-			warnings += ret;
-		}
-	}
-
-	list_for_each_entry(sec, &file->elf->sections, list) {
-		list_for_each_entry(func, &sec->symbol_list, list) {
-			if (func->type != STT_FUNC)
-				continue;
-
-			func_for_each_insn(file, func, insn) {
-				if (insn->visited)
-					continue;
-
-				insn->visited = true;
-
-				if (file->ignore_unreachables || warnings ||
-				    ignore_unreachable_insn(func, insn))
-					continue;
-
-				/*
-				 * gcov produces a lot of unreachable
-				 * instructions.  If we get an unreachable
-				 * warning and the file has gcov enabled, just
-				 * ignore it, and all other such warnings for
-				 * the file.
-				 */
-				if (!file->ignore_unreachables &&
-				    gcov_enabled(file)) {
-					file->ignore_unreachables = true;
-					continue;
-				}
-
-				WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
-				warnings++;
-			}
-		}
-	}
-
-	return warnings;
-}
-
-static int validate_uncallable_instructions(struct objtool_file *file)
-{
-	struct instruction *insn;
-	int warnings = 0;
-
-	for_each_insn(file, insn) {
-		if (!insn->visited && insn->type == INSN_RETURN) {
-
-			/*
-			 * Don't warn about call instructions in unvisited
-			 * retpoline alternatives.
-			 */
-			if (!strcmp(insn->sec->name, ".altinstr_replacement"))
-				continue;
-
-			WARN_FUNC("return instruction outside of a callable function",
-				  insn->sec, insn->offset);
-			warnings++;
-		}
-	}
-
-	return warnings;
-}
-
-static void cleanup(struct objtool_file *file)
-{
-	struct instruction *insn, *tmpinsn;
-	struct alternative *alt, *tmpalt;
-
-	list_for_each_entry_safe(insn, tmpinsn, &file->insn_list, list) {
-		list_for_each_entry_safe(alt, tmpalt, &insn->alts, list) {
-			list_del(&alt->list);
-			free(alt);
-		}
-		list_del(&insn->list);
-		hash_del(&insn->hash);
-		free(insn);
-	}
-	elf_close(file->elf);
-}
-
-const char * const check_usage[] = {
+static const char * const check_usage[] = {
 	"objtool check [<options>] file.o",
 	NULL,
 };
 
+const struct option check_options[] = {
+	OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"),
+	OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
+	OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"),
+	OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"),
+	OPT_END(),
+};
+
 int cmd_check(int argc, const char **argv)
 {
-	struct objtool_file file;
-	int ret, warnings = 0;
+	const char *objname;
 
-	const struct option options[] = {
-		OPT_BOOLEAN('f', "no-fp", &nofp, "Skip frame pointer validation"),
-		OPT_END(),
-	};
-
-	argc = parse_options(argc, argv, options, check_usage, 0);
+	argc = parse_options(argc, argv, check_options, check_usage, 0);
 
 	if (argc != 1)
-		usage_with_options(check_usage, options);
+		usage_with_options(check_usage, check_options);
 
 	objname = argv[0];
 
-	file.elf = elf_open(objname);
-	if (!file.elf) {
-		fprintf(stderr, "error reading elf file %s\n", objname);
-		return 1;
-	}
-
-	INIT_LIST_HEAD(&file.insn_list);
-	hash_init(file.insn_hash);
-	file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
-	file.rodata = find_section_by_name(file.elf, ".rodata");
-	file.ignore_unreachables = false;
-	file.c_file = find_section_by_name(file.elf, ".comment");
-
-	ret = decode_sections(&file);
-	if (ret < 0)
-		goto out;
-	warnings += ret;
-
-	ret = validate_functions(&file);
-	if (ret < 0)
-		goto out;
-	warnings += ret;
-
-	ret = validate_uncallable_instructions(&file);
-	if (ret < 0)
-		goto out;
-	warnings += ret;
-
-out:
-	cleanup(&file);
-
-	/* ignore warnings for now until we get all the code cleaned up */
-	if (ret || warnings)
-		return 0;
-	return 0;
+	return check(objname, false);
 }
diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c
new file mode 100644
index 0000000..77ea2b9
--- /dev/null
+++ b/tools/objtool/builtin-orc.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * objtool orc:
+ *
+ * This command analyzes a .o file and adds .orc_unwind and .orc_unwind_ip
+ * sections to it, which is used by the in-kernel ORC unwinder.
+ *
+ * This command is a superset of "objtool check".
+ */
+
+#include <string.h>
+#include "builtin.h"
+#include "check.h"
+
+
+static const char *orc_usage[] = {
+	"objtool orc generate [<options>] file.o",
+	"objtool orc dump file.o",
+	NULL,
+};
+
+int cmd_orc(int argc, const char **argv)
+{
+	const char *objname;
+
+	argc--; argv++;
+	if (argc <= 0)
+		usage_with_options(orc_usage, check_options);
+
+	if (!strncmp(argv[0], "gen", 3)) {
+		argc = parse_options(argc, argv, check_options, orc_usage, 0);
+		if (argc != 1)
+			usage_with_options(orc_usage, check_options);
+
+		objname = argv[0];
+
+		return check(objname, true);
+	}
+
+	if (!strcmp(argv[0], "dump")) {
+		if (argc != 2)
+			usage_with_options(orc_usage, check_options);
+
+		objname = argv[1];
+
+		return orc_dump(objname);
+	}
+
+	usage_with_options(orc_usage, check_options);
+
+	return 0;
+}
diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h
index 34d2ba7..28ff40e 100644
--- a/tools/objtool/builtin.h
+++ b/tools/objtool/builtin.h
@@ -17,6 +17,12 @@
 #ifndef _BUILTIN_H
 #define _BUILTIN_H
 
+#include <subcmd/parse-options.h>
+
+extern const struct option check_options[];
+extern bool no_fp, no_unreachable, retpoline, module;
+
 extern int cmd_check(int argc, const char **argv);
+extern int cmd_orc(int argc, const char **argv);
 
 #endif /* _BUILTIN_H */
diff --git a/tools/objtool/cfi.h b/tools/objtool/cfi.h
new file mode 100644
index 0000000..2fe883c
--- /dev/null
+++ b/tools/objtool/cfi.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _OBJTOOL_CFI_H
+#define _OBJTOOL_CFI_H
+
+#define CFI_UNDEFINED		-1
+#define CFI_CFA			-2
+#define CFI_SP_INDIRECT		-3
+#define CFI_BP_INDIRECT		-4
+
+#define CFI_AX			0
+#define CFI_DX			1
+#define CFI_CX			2
+#define CFI_BX			3
+#define CFI_SI			4
+#define CFI_DI			5
+#define CFI_BP			6
+#define CFI_SP			7
+#define CFI_R8			8
+#define CFI_R9			9
+#define CFI_R10			10
+#define CFI_R11			11
+#define CFI_R12			12
+#define CFI_R13			13
+#define CFI_R14			14
+#define CFI_R15			15
+#define CFI_RA			16
+#define CFI_NUM_REGS		17
+
+struct cfi_reg {
+	int base;
+	int offset;
+};
+
+struct cfi_state {
+	struct cfi_reg cfa;
+	struct cfi_reg regs[CFI_NUM_REGS];
+};
+
+#endif /* _OBJTOOL_CFI_H */
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
new file mode 100644
index 0000000..e128d1c
--- /dev/null
+++ b/tools/objtool/check.c
@@ -0,0 +1,2209 @@
+/*
+ * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+
+#include "builtin.h"
+#include "check.h"
+#include "elf.h"
+#include "special.h"
+#include "arch.h"
+#include "warn.h"
+
+#include <linux/hashtable.h>
+#include <linux/kernel.h>
+
+struct alternative {
+	struct list_head list;
+	struct instruction *insn;
+};
+
+const char *objname;
+struct cfi_state initial_func_cfi;
+
+struct instruction *find_insn(struct objtool_file *file,
+			      struct section *sec, unsigned long offset)
+{
+	struct instruction *insn;
+
+	hash_for_each_possible(file->insn_hash, insn, hash, offset)
+		if (insn->sec == sec && insn->offset == offset)
+			return insn;
+
+	return NULL;
+}
+
+static struct instruction *next_insn_same_sec(struct objtool_file *file,
+					      struct instruction *insn)
+{
+	struct instruction *next = list_next_entry(insn, list);
+
+	if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
+		return NULL;
+
+	return next;
+}
+
+static struct instruction *next_insn_same_func(struct objtool_file *file,
+					       struct instruction *insn)
+{
+	struct instruction *next = list_next_entry(insn, list);
+	struct symbol *func = insn->func;
+
+	if (!func)
+		return NULL;
+
+	if (&next->list != &file->insn_list && next->func == func)
+		return next;
+
+	/* Check if we're already in the subfunction: */
+	if (func == func->cfunc)
+		return NULL;
+
+	/* Move to the subfunction: */
+	return find_insn(file, func->cfunc->sec, func->cfunc->offset);
+}
+
+#define func_for_each_insn_all(file, func, insn)			\
+	for (insn = find_insn(file, func->sec, func->offset);		\
+	     insn;							\
+	     insn = next_insn_same_func(file, insn))
+
+#define func_for_each_insn(file, func, insn)				\
+	for (insn = find_insn(file, func->sec, func->offset);		\
+	     insn && &insn->list != &file->insn_list &&			\
+		insn->sec == func->sec &&				\
+		insn->offset < func->offset + func->len;		\
+	     insn = list_next_entry(insn, list))
+
+#define func_for_each_insn_continue_reverse(file, func, insn)		\
+	for (insn = list_prev_entry(insn, list);			\
+	     &insn->list != &file->insn_list &&				\
+		insn->sec == func->sec && insn->offset >= func->offset;	\
+	     insn = list_prev_entry(insn, list))
+
+#define sec_for_each_insn_from(file, insn)				\
+	for (; insn; insn = next_insn_same_sec(file, insn))
+
+#define sec_for_each_insn_continue(file, insn)				\
+	for (insn = next_insn_same_sec(file, insn); insn;		\
+	     insn = next_insn_same_sec(file, insn))
+
+/*
+ * Check if the function has been manually whitelisted with the
+ * STACK_FRAME_NON_STANDARD macro, or if it should be automatically whitelisted
+ * due to its use of a context switching instruction.
+ */
+static bool ignore_func(struct objtool_file *file, struct symbol *func)
+{
+	struct rela *rela;
+
+	/* check for STACK_FRAME_NON_STANDARD */
+	if (file->whitelist && file->whitelist->rela)
+		list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
+			if (rela->sym->type == STT_SECTION &&
+			    rela->sym->sec == func->sec &&
+			    rela->addend == func->offset)
+				return true;
+			if (rela->sym->type == STT_FUNC && rela->sym == func)
+				return true;
+		}
+
+	return false;
+}
+
+/*
+ * This checks to see if the given function is a "noreturn" function.
+ *
+ * For global functions which are outside the scope of this object file, we
+ * have to keep a manual list of them.
+ *
+ * For local functions, we have to detect them manually by simply looking for
+ * the lack of a return instruction.
+ *
+ * Returns:
+ *  -1: error
+ *   0: no dead end
+ *   1: dead end
+ */
+static int __dead_end_function(struct objtool_file *file, struct symbol *func,
+			       int recursion)
+{
+	int i;
+	struct instruction *insn;
+	bool empty = true;
+
+	/*
+	 * Unfortunately these have to be hard coded because the noreturn
+	 * attribute isn't provided in ELF data.
+	 */
+	static const char * const global_noreturns[] = {
+		"__stack_chk_fail",
+		"panic",
+		"do_exit",
+		"do_task_dead",
+		"__module_put_and_exit",
+		"complete_and_exit",
+		"kvm_spurious_fault",
+		"__reiserfs_panic",
+		"lbug_with_loc",
+		"fortify_panic",
+	};
+
+	if (func->bind == STB_WEAK)
+		return 0;
+
+	if (func->bind == STB_GLOBAL)
+		for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
+			if (!strcmp(func->name, global_noreturns[i]))
+				return 1;
+
+	if (!func->len)
+		return 0;
+
+	insn = find_insn(file, func->sec, func->offset);
+	if (!insn->func)
+		return 0;
+
+	func_for_each_insn_all(file, func, insn) {
+		empty = false;
+
+		if (insn->type == INSN_RETURN)
+			return 0;
+	}
+
+	if (empty)
+		return 0;
+
+	/*
+	 * A function can have a sibling call instead of a return.  In that
+	 * case, the function's dead-end status depends on whether the target
+	 * of the sibling call returns.
+	 */
+	func_for_each_insn_all(file, func, insn) {
+		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
+			struct instruction *dest = insn->jump_dest;
+
+			if (!dest)
+				/* sibling call to another file */
+				return 0;
+
+			if (dest->func && dest->func->pfunc != insn->func->pfunc) {
+
+				/* local sibling call */
+				if (recursion == 5) {
+					/*
+					 * Infinite recursion: two functions
+					 * have sibling calls to each other.
+					 * This is a very rare case.  It means
+					 * they aren't dead ends.
+					 */
+					return 0;
+				}
+
+				return __dead_end_function(file, dest->func,
+							   recursion + 1);
+			}
+		}
+
+		if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts))
+			/* sibling call */
+			return 0;
+	}
+
+	return 1;
+}
+
+static int dead_end_function(struct objtool_file *file, struct symbol *func)
+{
+	return __dead_end_function(file, func, 0);
+}
+
+static void clear_insn_state(struct insn_state *state)
+{
+	int i;
+
+	memset(state, 0, sizeof(*state));
+	state->cfa.base = CFI_UNDEFINED;
+	for (i = 0; i < CFI_NUM_REGS; i++) {
+		state->regs[i].base = CFI_UNDEFINED;
+		state->vals[i].base = CFI_UNDEFINED;
+	}
+	state->drap_reg = CFI_UNDEFINED;
+	state->drap_offset = -1;
+}
+
+/*
+ * Call the arch-specific instruction decoder for all the instructions and add
+ * them to the global instruction list.
+ */
+static int decode_instructions(struct objtool_file *file)
+{
+	struct section *sec;
+	struct symbol *func;
+	unsigned long offset;
+	struct instruction *insn;
+	int ret;
+
+	for_each_sec(file, sec) {
+
+		if (!(sec->sh.sh_flags & SHF_EXECINSTR))
+			continue;
+
+		if (strcmp(sec->name, ".altinstr_replacement") &&
+		    strcmp(sec->name, ".altinstr_aux") &&
+		    strncmp(sec->name, ".discard.", 9))
+			sec->text = true;
+
+		for (offset = 0; offset < sec->len; offset += insn->len) {
+			insn = malloc(sizeof(*insn));
+			if (!insn) {
+				WARN("malloc failed");
+				return -1;
+			}
+			memset(insn, 0, sizeof(*insn));
+			INIT_LIST_HEAD(&insn->alts);
+			clear_insn_state(&insn->state);
+
+			insn->sec = sec;
+			insn->offset = offset;
+
+			ret = arch_decode_instruction(file->elf, sec, offset,
+						      sec->len - offset,
+						      &insn->len, &insn->type,
+						      &insn->immediate,
+						      &insn->stack_op);
+			if (ret)
+				goto err;
+
+			if (!insn->type || insn->type > INSN_LAST) {
+				WARN_FUNC("invalid instruction type %d",
+					  insn->sec, insn->offset, insn->type);
+				ret = -1;
+				goto err;
+			}
+
+			hash_add(file->insn_hash, &insn->hash, insn->offset);
+			list_add_tail(&insn->list, &file->insn_list);
+		}
+
+		list_for_each_entry(func, &sec->symbol_list, list) {
+			if (func->type != STT_FUNC)
+				continue;
+
+			if (!find_insn(file, sec, func->offset)) {
+				WARN("%s(): can't find starting instruction",
+				     func->name);
+				return -1;
+			}
+
+			func_for_each_insn(file, func, insn)
+				if (!insn->func)
+					insn->func = func;
+		}
+	}
+
+	return 0;
+
+err:
+	free(insn);
+	return ret;
+}
+
+/*
+ * Mark "ud2" instructions and manually annotated dead ends.
+ */
+static int add_dead_ends(struct objtool_file *file)
+{
+	struct section *sec;
+	struct rela *rela;
+	struct instruction *insn;
+	bool found;
+
+	/*
+	 * By default, "ud2" is a dead end unless otherwise annotated, because
+	 * GCC 7 inserts it for certain divide-by-zero cases.
+	 */
+	for_each_insn(file, insn)
+		if (insn->type == INSN_BUG)
+			insn->dead_end = true;
+
+	/*
+	 * Check for manually annotated dead ends.
+	 */
+	sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
+	if (!sec)
+		goto reachable;
+
+	list_for_each_entry(rela, &sec->rela_list, list) {
+		if (rela->sym->type != STT_SECTION) {
+			WARN("unexpected relocation symbol type in %s", sec->name);
+			return -1;
+		}
+		insn = find_insn(file, rela->sym->sec, rela->addend);
+		if (insn)
+			insn = list_prev_entry(insn, list);
+		else if (rela->addend == rela->sym->sec->len) {
+			found = false;
+			list_for_each_entry_reverse(insn, &file->insn_list, list) {
+				if (insn->sec == rela->sym->sec) {
+					found = true;
+					break;
+				}
+			}
+
+			if (!found) {
+				WARN("can't find unreachable insn at %s+0x%x",
+				     rela->sym->sec->name, rela->addend);
+				return -1;
+			}
+		} else {
+			WARN("can't find unreachable insn at %s+0x%x",
+			     rela->sym->sec->name, rela->addend);
+			return -1;
+		}
+
+		insn->dead_end = true;
+	}
+
+reachable:
+	/*
+	 * These manually annotated reachable checks are needed for GCC 4.4,
+	 * where the Linux unreachable() macro isn't supported.  In that case
+	 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
+	 * not a dead end.
+	 */
+	sec = find_section_by_name(file->elf, ".rela.discard.reachable");
+	if (!sec)
+		return 0;
+
+	list_for_each_entry(rela, &sec->rela_list, list) {
+		if (rela->sym->type != STT_SECTION) {
+			WARN("unexpected relocation symbol type in %s", sec->name);
+			return -1;
+		}
+		insn = find_insn(file, rela->sym->sec, rela->addend);
+		if (insn)
+			insn = list_prev_entry(insn, list);
+		else if (rela->addend == rela->sym->sec->len) {
+			found = false;
+			list_for_each_entry_reverse(insn, &file->insn_list, list) {
+				if (insn->sec == rela->sym->sec) {
+					found = true;
+					break;
+				}
+			}
+
+			if (!found) {
+				WARN("can't find reachable insn at %s+0x%x",
+				     rela->sym->sec->name, rela->addend);
+				return -1;
+			}
+		} else {
+			WARN("can't find reachable insn at %s+0x%x",
+			     rela->sym->sec->name, rela->addend);
+			return -1;
+		}
+
+		insn->dead_end = false;
+	}
+
+	return 0;
+}
+
+/*
+ * Warnings shouldn't be reported for ignored functions.
+ */
+static void add_ignores(struct objtool_file *file)
+{
+	struct instruction *insn;
+	struct section *sec;
+	struct symbol *func;
+
+	for_each_sec(file, sec) {
+		list_for_each_entry(func, &sec->symbol_list, list) {
+			if (func->type != STT_FUNC)
+				continue;
+
+			if (!ignore_func(file, func))
+				continue;
+
+			func_for_each_insn_all(file, func, insn)
+				insn->ignore = true;
+		}
+	}
+}
+
+/*
+ * FIXME: For now, just ignore any alternatives which add retpolines.  This is
+ * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
+ * But it at least allows objtool to understand the control flow *around* the
+ * retpoline.
+ */
+static int add_nospec_ignores(struct objtool_file *file)
+{
+	struct section *sec;
+	struct rela *rela;
+	struct instruction *insn;
+
+	sec = find_section_by_name(file->elf, ".rela.discard.nospec");
+	if (!sec)
+		return 0;
+
+	list_for_each_entry(rela, &sec->rela_list, list) {
+		if (rela->sym->type != STT_SECTION) {
+			WARN("unexpected relocation symbol type in %s", sec->name);
+			return -1;
+		}
+
+		insn = find_insn(file, rela->sym->sec, rela->addend);
+		if (!insn) {
+			WARN("bad .discard.nospec entry");
+			return -1;
+		}
+
+		insn->ignore_alts = true;
+	}
+
+	return 0;
+}
+
+/*
+ * Find the destination instructions for all jumps.
+ */
+static int add_jump_destinations(struct objtool_file *file)
+{
+	struct instruction *insn;
+	struct rela *rela;
+	struct section *dest_sec;
+	unsigned long dest_off;
+
+	for_each_insn(file, insn) {
+		if (insn->type != INSN_JUMP_CONDITIONAL &&
+		    insn->type != INSN_JUMP_UNCONDITIONAL)
+			continue;
+
+		if (insn->ignore)
+			continue;
+
+		rela = find_rela_by_dest_range(insn->sec, insn->offset,
+					       insn->len);
+		if (!rela) {
+			dest_sec = insn->sec;
+			dest_off = insn->offset + insn->len + insn->immediate;
+		} else if (rela->sym->type == STT_SECTION) {
+			dest_sec = rela->sym->sec;
+			dest_off = rela->addend + 4;
+		} else if (rela->sym->sec->idx) {
+			dest_sec = rela->sym->sec;
+			dest_off = rela->sym->sym.st_value + rela->addend + 4;
+		} else if (strstr(rela->sym->name, "_indirect_thunk_")) {
+			/*
+			 * Retpoline jumps are really dynamic jumps in
+			 * disguise, so convert them accordingly.
+			 */
+			insn->type = INSN_JUMP_DYNAMIC;
+			insn->retpoline_safe = true;
+			continue;
+		} else {
+			/* sibling call */
+			insn->jump_dest = 0;
+			continue;
+		}
+
+		insn->jump_dest = find_insn(file, dest_sec, dest_off);
+		if (!insn->jump_dest) {
+
+			/*
+			 * This is a special case where an alt instruction
+			 * jumps past the end of the section.  These are
+			 * handled later in handle_group_alt().
+			 */
+			if (!strcmp(insn->sec->name, ".altinstr_replacement"))
+				continue;
+
+			WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
+				  insn->sec, insn->offset, dest_sec->name,
+				  dest_off);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Find the destination instructions for all calls.
+ */
+static int add_call_destinations(struct objtool_file *file)
+{
+	struct instruction *insn;
+	unsigned long dest_off;
+	struct rela *rela;
+
+	for_each_insn(file, insn) {
+		if (insn->type != INSN_CALL)
+			continue;
+
+		rela = find_rela_by_dest_range(insn->sec, insn->offset,
+					       insn->len);
+		if (!rela) {
+			dest_off = insn->offset + insn->len + insn->immediate;
+			insn->call_dest = find_symbol_by_offset(insn->sec,
+								dest_off);
+
+			if (!insn->call_dest && !insn->ignore) {
+				WARN_FUNC("unsupported intra-function call",
+					  insn->sec, insn->offset);
+				if (retpoline)
+					WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
+				return -1;
+			}
+
+		} else if (rela->sym->type == STT_SECTION) {
+			insn->call_dest = find_symbol_by_offset(rela->sym->sec,
+								rela->addend+4);
+			if (!insn->call_dest ||
+			    insn->call_dest->type != STT_FUNC) {
+				WARN_FUNC("can't find call dest symbol at %s+0x%x",
+					  insn->sec, insn->offset,
+					  rela->sym->sec->name,
+					  rela->addend + 4);
+				return -1;
+			}
+		} else
+			insn->call_dest = rela->sym;
+	}
+
+	return 0;
+}
+
+/*
+ * The .alternatives section requires some extra special care, over and above
+ * what other special sections require:
+ *
+ * 1. Because alternatives are patched in-place, we need to insert a fake jump
+ *    instruction at the end so that validate_branch() skips all the original
+ *    replaced instructions when validating the new instruction path.
+ *
+ * 2. An added wrinkle is that the new instruction length might be zero.  In
+ *    that case the old instructions are replaced with noops.  We simulate that
+ *    by creating a fake jump as the only new instruction.
+ *
+ * 3. In some cases, the alternative section includes an instruction which
+ *    conditionally jumps to the _end_ of the entry.  We have to modify these
+ *    jumps' destinations to point back to .text rather than the end of the
+ *    entry in .altinstr_replacement.
+ *
+ * 4. It has been requested that we don't validate the !POPCNT feature path
+ *    which is a "very very small percentage of machines".
+ */
+static int handle_group_alt(struct objtool_file *file,
+			    struct special_alt *special_alt,
+			    struct instruction *orig_insn,
+			    struct instruction **new_insn)
+{
+	struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump = NULL;
+	unsigned long dest_off;
+
+	last_orig_insn = NULL;
+	insn = orig_insn;
+	sec_for_each_insn_from(file, insn) {
+		if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
+			break;
+
+		if (special_alt->skip_orig)
+			insn->type = INSN_NOP;
+
+		insn->alt_group = true;
+		last_orig_insn = insn;
+	}
+
+	if (next_insn_same_sec(file, last_orig_insn)) {
+		fake_jump = malloc(sizeof(*fake_jump));
+		if (!fake_jump) {
+			WARN("malloc failed");
+			return -1;
+		}
+		memset(fake_jump, 0, sizeof(*fake_jump));
+		INIT_LIST_HEAD(&fake_jump->alts);
+		clear_insn_state(&fake_jump->state);
+
+		fake_jump->sec = special_alt->new_sec;
+		fake_jump->offset = -1;
+		fake_jump->type = INSN_JUMP_UNCONDITIONAL;
+		fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
+		fake_jump->ignore = true;
+	}
+
+	if (!special_alt->new_len) {
+		if (!fake_jump) {
+			WARN("%s: empty alternative at end of section",
+			     special_alt->orig_sec->name);
+			return -1;
+		}
+
+		*new_insn = fake_jump;
+		return 0;
+	}
+
+	last_new_insn = NULL;
+	insn = *new_insn;
+	sec_for_each_insn_from(file, insn) {
+		if (insn->offset >= special_alt->new_off + special_alt->new_len)
+			break;
+
+		last_new_insn = insn;
+
+		insn->ignore = orig_insn->ignore_alts;
+
+		if (insn->type != INSN_JUMP_CONDITIONAL &&
+		    insn->type != INSN_JUMP_UNCONDITIONAL)
+			continue;
+
+		if (!insn->immediate)
+			continue;
+
+		dest_off = insn->offset + insn->len + insn->immediate;
+		if (dest_off == special_alt->new_off + special_alt->new_len) {
+			if (!fake_jump) {
+				WARN("%s: alternative jump to end of section",
+				     special_alt->orig_sec->name);
+				return -1;
+			}
+			insn->jump_dest = fake_jump;
+		}
+
+		if (!insn->jump_dest) {
+			WARN_FUNC("can't find alternative jump destination",
+				  insn->sec, insn->offset);
+			return -1;
+		}
+	}
+
+	if (!last_new_insn) {
+		WARN_FUNC("can't find last new alternative instruction",
+			  special_alt->new_sec, special_alt->new_off);
+		return -1;
+	}
+
+	if (fake_jump)
+		list_add(&fake_jump->list, &last_new_insn->list);
+
+	return 0;
+}
+
+/*
+ * A jump table entry can either convert a nop to a jump or a jump to a nop.
+ * If the original instruction is a jump, make the alt entry an effective nop
+ * by just skipping the original instruction.
+ */
+static int handle_jump_alt(struct objtool_file *file,
+			   struct special_alt *special_alt,
+			   struct instruction *orig_insn,
+			   struct instruction **new_insn)
+{
+	if (orig_insn->type == INSN_NOP)
+		return 0;
+
+	if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
+		WARN_FUNC("unsupported instruction at jump label",
+			  orig_insn->sec, orig_insn->offset);
+		return -1;
+	}
+
+	*new_insn = list_next_entry(orig_insn, list);
+	return 0;
+}
+
+/*
+ * Read all the special sections which have alternate instructions which can be
+ * patched in or redirected to at runtime.  Each instruction having alternate
+ * instruction(s) has them added to its insn->alts list, which will be
+ * traversed in validate_branch().
+ */
+static int add_special_section_alts(struct objtool_file *file)
+{
+	struct list_head special_alts;
+	struct instruction *orig_insn, *new_insn;
+	struct special_alt *special_alt, *tmp;
+	struct alternative *alt;
+	int ret;
+
+	ret = special_get_alts(file->elf, &special_alts);
+	if (ret)
+		return ret;
+
+	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
+
+		orig_insn = find_insn(file, special_alt->orig_sec,
+				      special_alt->orig_off);
+		if (!orig_insn) {
+			WARN_FUNC("special: can't find orig instruction",
+				  special_alt->orig_sec, special_alt->orig_off);
+			ret = -1;
+			goto out;
+		}
+
+		new_insn = NULL;
+		if (!special_alt->group || special_alt->new_len) {
+			new_insn = find_insn(file, special_alt->new_sec,
+					     special_alt->new_off);
+			if (!new_insn) {
+				WARN_FUNC("special: can't find new instruction",
+					  special_alt->new_sec,
+					  special_alt->new_off);
+				ret = -1;
+				goto out;
+			}
+		}
+
+		if (special_alt->group) {
+			ret = handle_group_alt(file, special_alt, orig_insn,
+					       &new_insn);
+			if (ret)
+				goto out;
+		} else if (special_alt->jump_or_nop) {
+			ret = handle_jump_alt(file, special_alt, orig_insn,
+					      &new_insn);
+			if (ret)
+				goto out;
+		}
+
+		alt = malloc(sizeof(*alt));
+		if (!alt) {
+			WARN("malloc failed");
+			ret = -1;
+			goto out;
+		}
+
+		alt->insn = new_insn;
+		list_add_tail(&alt->list, &orig_insn->alts);
+
+		list_del(&special_alt->list);
+		free(special_alt);
+	}
+
+out:
+	return ret;
+}
+
+static int add_switch_table(struct objtool_file *file, struct instruction *insn,
+			    struct rela *table, struct rela *next_table)
+{
+	struct rela *rela = table;
+	struct instruction *alt_insn;
+	struct alternative *alt;
+	struct symbol *pfunc = insn->func->pfunc;
+	unsigned int prev_offset = 0;
+
+	list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) {
+		if (rela == next_table)
+			break;
+
+		/* Make sure the switch table entries are consecutive: */
+		if (prev_offset && rela->offset != prev_offset + 8)
+			break;
+
+		/* Detect function pointers from contiguous objects: */
+		if (rela->sym->sec == pfunc->sec &&
+		    rela->addend == pfunc->offset)
+			break;
+
+		alt_insn = find_insn(file, rela->sym->sec, rela->addend);
+		if (!alt_insn)
+			break;
+
+		/* Make sure the jmp dest is in the function or subfunction: */
+		if (alt_insn->func->pfunc != pfunc)
+			break;
+
+		alt = malloc(sizeof(*alt));
+		if (!alt) {
+			WARN("malloc failed");
+			return -1;
+		}
+
+		alt->insn = alt_insn;
+		list_add_tail(&alt->list, &insn->alts);
+		prev_offset = rela->offset;
+	}
+
+	if (!prev_offset) {
+		WARN_FUNC("can't find switch jump table",
+			  insn->sec, insn->offset);
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+ * find_switch_table() - Given a dynamic jump, find the switch jump table in
+ * .rodata associated with it.
+ *
+ * There are 3 basic patterns:
+ *
+ * 1. jmpq *[rodata addr](,%reg,8)
+ *
+ *    This is the most common case by far.  It jumps to an address in a simple
+ *    jump table which is stored in .rodata.
+ *
+ * 2. jmpq *[rodata addr](%rip)
+ *
+ *    This is caused by a rare GCC quirk, currently only seen in three driver
+ *    functions in the kernel, only with certain obscure non-distro configs.
+ *
+ *    As part of an optimization, GCC makes a copy of an existing switch jump
+ *    table, modifies it, and then hard-codes the jump (albeit with an indirect
+ *    jump) to use a single entry in the table.  The rest of the jump table and
+ *    some of its jump targets remain as dead code.
+ *
+ *    In such a case we can just crudely ignore all unreachable instruction
+ *    warnings for the entire object file.  Ideally we would just ignore them
+ *    for the function, but that would require redesigning the code quite a
+ *    bit.  And honestly that's just not worth doing: unreachable instruction
+ *    warnings are of questionable value anyway, and this is such a rare issue.
+ *
+ * 3. mov [rodata addr],%reg1
+ *    ... some instructions ...
+ *    jmpq *(%reg1,%reg2,8)
+ *
+ *    This is a fairly uncommon pattern which is new for GCC 6.  As of this
+ *    writing, there are 11 occurrences of it in the allmodconfig kernel.
+ *
+ *    As of GCC 7 there are quite a few more of these and the 'in between' code
+ *    is significant. Esp. with KASAN enabled some of the code between the mov
+ *    and jmpq uses .rodata itself, which can confuse things.
+ *
+ *    TODO: Once we have DWARF CFI and smarter instruction decoding logic,
+ *    ensure the same register is used in the mov and jump instructions.
+ *
+ *    NOTE: RETPOLINE made it harder still to decode dynamic jumps.
+ */
+static struct rela *find_switch_table(struct objtool_file *file,
+				      struct symbol *func,
+				      struct instruction *insn)
+{
+	struct rela *text_rela, *rodata_rela;
+	struct instruction *orig_insn = insn;
+	unsigned long table_offset;
+
+	/*
+	 * Backward search using the @first_jump_src links, these help avoid
+	 * much of the 'in between' code. Which avoids us getting confused by
+	 * it.
+	 */
+	for (;
+	     &insn->list != &file->insn_list &&
+	     insn->sec == func->sec &&
+	     insn->offset >= func->offset;
+
+	     insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
+
+		if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
+			break;
+
+		/* allow small jumps within the range */
+		if (insn->type == INSN_JUMP_UNCONDITIONAL &&
+		    insn->jump_dest &&
+		    (insn->jump_dest->offset <= insn->offset ||
+		     insn->jump_dest->offset > orig_insn->offset))
+		    break;
+
+		/* look for a relocation which references .rodata */
+		text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
+						    insn->len);
+		if (!text_rela || text_rela->sym != file->rodata->sym)
+			continue;
+
+		table_offset = text_rela->addend;
+		if (text_rela->type == R_X86_64_PC32)
+			table_offset += 4;
+
+		/*
+		 * Make sure the .rodata address isn't associated with a
+		 * symbol.  gcc jump tables are anonymous data.
+		 */
+		if (find_symbol_containing(file->rodata, table_offset))
+			continue;
+
+		rodata_rela = find_rela_by_dest(file->rodata, table_offset);
+		if (rodata_rela) {
+			/*
+			 * Use of RIP-relative switch jumps is quite rare, and
+			 * indicates a rare GCC quirk/bug which can leave dead
+			 * code behind.
+			 */
+			if (text_rela->type == R_X86_64_PC32)
+				file->ignore_unreachables = true;
+
+			return rodata_rela;
+		}
+	}
+
+	return NULL;
+}
+
+
+static int add_func_switch_tables(struct objtool_file *file,
+				  struct symbol *func)
+{
+	struct instruction *insn, *last = NULL, *prev_jump = NULL;
+	struct rela *rela, *prev_rela = NULL;
+	int ret;
+
+	func_for_each_insn_all(file, func, insn) {
+		if (!last)
+			last = insn;
+
+		/*
+		 * Store back-pointers for unconditional forward jumps such
+		 * that find_switch_table() can back-track using those and
+		 * avoid some potentially confusing code.
+		 */
+		if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
+		    insn->offset > last->offset &&
+		    insn->jump_dest->offset > insn->offset &&
+		    !insn->jump_dest->first_jump_src) {
+
+			insn->jump_dest->first_jump_src = insn;
+			last = insn->jump_dest;
+		}
+
+		if (insn->type != INSN_JUMP_DYNAMIC)
+			continue;
+
+		rela = find_switch_table(file, func, insn);
+		if (!rela)
+			continue;
+
+		/*
+		 * We found a switch table, but we don't know yet how big it
+		 * is.  Don't add it until we reach the end of the function or
+		 * the beginning of another switch table in the same function.
+		 */
+		if (prev_jump) {
+			ret = add_switch_table(file, prev_jump, prev_rela, rela);
+			if (ret)
+				return ret;
+		}
+
+		prev_jump = insn;
+		prev_rela = rela;
+	}
+
+	if (prev_jump) {
+		ret = add_switch_table(file, prev_jump, prev_rela, NULL);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+/*
+ * For some switch statements, gcc generates a jump table in the .rodata
+ * section which contains a list of addresses within the function to jump to.
+ * This finds these jump tables and adds them to the insn->alts lists.
+ */
+static int add_switch_table_alts(struct objtool_file *file)
+{
+	struct section *sec;
+	struct symbol *func;
+	int ret;
+
+	if (!file->rodata || !file->rodata->rela)
+		return 0;
+
+	for_each_sec(file, sec) {
+		list_for_each_entry(func, &sec->symbol_list, list) {
+			if (func->type != STT_FUNC)
+				continue;
+
+			ret = add_func_switch_tables(file, func);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int read_unwind_hints(struct objtool_file *file)
+{
+	struct section *sec, *relasec;
+	struct rela *rela;
+	struct unwind_hint *hint;
+	struct instruction *insn;
+	struct cfi_reg *cfa;
+	int i;
+
+	sec = find_section_by_name(file->elf, ".discard.unwind_hints");
+	if (!sec)
+		return 0;
+
+	relasec = sec->rela;
+	if (!relasec) {
+		WARN("missing .rela.discard.unwind_hints section");
+		return -1;
+	}
+
+	if (sec->len % sizeof(struct unwind_hint)) {
+		WARN("struct unwind_hint size mismatch");
+		return -1;
+	}
+
+	file->hints = true;
+
+	for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) {
+		hint = (struct unwind_hint *)sec->data->d_buf + i;
+
+		rela = find_rela_by_dest(sec, i * sizeof(*hint));
+		if (!rela) {
+			WARN("can't find rela for unwind_hints[%d]", i);
+			return -1;
+		}
+
+		insn = find_insn(file, rela->sym->sec, rela->addend);
+		if (!insn) {
+			WARN("can't find insn for unwind_hints[%d]", i);
+			return -1;
+		}
+
+		cfa = &insn->state.cfa;
+
+		if (hint->type == UNWIND_HINT_TYPE_SAVE) {
+			insn->save = true;
+			continue;
+
+		} else if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
+			insn->restore = true;
+			insn->hint = true;
+			continue;
+		}
+
+		insn->hint = true;
+
+		switch (hint->sp_reg) {
+		case ORC_REG_UNDEFINED:
+			cfa->base = CFI_UNDEFINED;
+			break;
+		case ORC_REG_SP:
+			cfa->base = CFI_SP;
+			break;
+		case ORC_REG_BP:
+			cfa->base = CFI_BP;
+			break;
+		case ORC_REG_SP_INDIRECT:
+			cfa->base = CFI_SP_INDIRECT;
+			break;
+		case ORC_REG_R10:
+			cfa->base = CFI_R10;
+			break;
+		case ORC_REG_R13:
+			cfa->base = CFI_R13;
+			break;
+		case ORC_REG_DI:
+			cfa->base = CFI_DI;
+			break;
+		case ORC_REG_DX:
+			cfa->base = CFI_DX;
+			break;
+		default:
+			WARN_FUNC("unsupported unwind_hint sp base reg %d",
+				  insn->sec, insn->offset, hint->sp_reg);
+			return -1;
+		}
+
+		cfa->offset = hint->sp_offset;
+		insn->state.type = hint->type;
+	}
+
+	return 0;
+}
+
+static int read_retpoline_hints(struct objtool_file *file)
+{
+	struct section *sec;
+	struct instruction *insn;
+	struct rela *rela;
+
+	sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
+	if (!sec)
+		return 0;
+
+	list_for_each_entry(rela, &sec->rela_list, list) {
+		if (rela->sym->type != STT_SECTION) {
+			WARN("unexpected relocation symbol type in %s", sec->name);
+			return -1;
+		}
+
+		insn = find_insn(file, rela->sym->sec, rela->addend);
+		if (!insn) {
+			WARN("bad .discard.retpoline_safe entry");
+			return -1;
+		}
+
+		if (insn->type != INSN_JUMP_DYNAMIC &&
+		    insn->type != INSN_CALL_DYNAMIC) {
+			WARN_FUNC("retpoline_safe hint not an indirect jump/call",
+				  insn->sec, insn->offset);
+			return -1;
+		}
+
+		insn->retpoline_safe = true;
+	}
+
+	return 0;
+}
+
+static int decode_sections(struct objtool_file *file)
+{
+	int ret;
+
+	ret = decode_instructions(file);
+	if (ret)
+		return ret;
+
+	ret = add_dead_ends(file);
+	if (ret)
+		return ret;
+
+	add_ignores(file);
+
+	ret = add_nospec_ignores(file);
+	if (ret)
+		return ret;
+
+	ret = add_jump_destinations(file);
+	if (ret)
+		return ret;
+
+	ret = add_special_section_alts(file);
+	if (ret)
+		return ret;
+
+	ret = add_call_destinations(file);
+	if (ret)
+		return ret;
+
+	ret = add_switch_table_alts(file);
+	if (ret)
+		return ret;
+
+	ret = read_unwind_hints(file);
+	if (ret)
+		return ret;
+
+	ret = read_retpoline_hints(file);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static bool is_fentry_call(struct instruction *insn)
+{
+	if (insn->type == INSN_CALL &&
+	    insn->call_dest->type == STT_NOTYPE &&
+	    !strcmp(insn->call_dest->name, "__fentry__"))
+		return true;
+
+	return false;
+}
+
+static bool has_modified_stack_frame(struct insn_state *state)
+{
+	int i;
+
+	if (state->cfa.base != initial_func_cfi.cfa.base ||
+	    state->cfa.offset != initial_func_cfi.cfa.offset ||
+	    state->stack_size != initial_func_cfi.cfa.offset ||
+	    state->drap)
+		return true;
+
+	for (i = 0; i < CFI_NUM_REGS; i++)
+		if (state->regs[i].base != initial_func_cfi.regs[i].base ||
+		    state->regs[i].offset != initial_func_cfi.regs[i].offset)
+			return true;
+
+	return false;
+}
+
+static bool has_valid_stack_frame(struct insn_state *state)
+{
+	if (state->cfa.base == CFI_BP && state->regs[CFI_BP].base == CFI_CFA &&
+	    state->regs[CFI_BP].offset == -16)
+		return true;
+
+	if (state->drap && state->regs[CFI_BP].base == CFI_BP)
+		return true;
+
+	return false;
+}
+
+static int update_insn_state_regs(struct instruction *insn, struct insn_state *state)
+{
+	struct cfi_reg *cfa = &state->cfa;
+	struct stack_op *op = &insn->stack_op;
+
+	if (cfa->base != CFI_SP)
+		return 0;
+
+	/* push */
+	if (op->dest.type == OP_DEST_PUSH)
+		cfa->offset += 8;
+
+	/* pop */
+	if (op->src.type == OP_SRC_POP)
+		cfa->offset -= 8;
+
+	/* add immediate to sp */
+	if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
+	    op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
+		cfa->offset -= op->src.offset;
+
+	return 0;
+}
+
+static void save_reg(struct insn_state *state, unsigned char reg, int base,
+		     int offset)
+{
+	if (arch_callee_saved_reg(reg) &&
+	    state->regs[reg].base == CFI_UNDEFINED) {
+		state->regs[reg].base = base;
+		state->regs[reg].offset = offset;
+	}
+}
+
+static void restore_reg(struct insn_state *state, unsigned char reg)
+{
+	state->regs[reg].base = CFI_UNDEFINED;
+	state->regs[reg].offset = 0;
+}
+
+/*
+ * A note about DRAP stack alignment:
+ *
+ * GCC has the concept of a DRAP register, which is used to help keep track of
+ * the stack pointer when aligning the stack.  r10 or r13 is used as the DRAP
+ * register.  The typical DRAP pattern is:
+ *
+ *   4c 8d 54 24 08		lea    0x8(%rsp),%r10
+ *   48 83 e4 c0		and    $0xffffffffffffffc0,%rsp
+ *   41 ff 72 f8		pushq  -0x8(%r10)
+ *   55				push   %rbp
+ *   48 89 e5			mov    %rsp,%rbp
+ *				(more pushes)
+ *   41 52			push   %r10
+ *				...
+ *   41 5a			pop    %r10
+ *				(more pops)
+ *   5d				pop    %rbp
+ *   49 8d 62 f8		lea    -0x8(%r10),%rsp
+ *   c3				retq
+ *
+ * There are some variations in the epilogues, like:
+ *
+ *   5b				pop    %rbx
+ *   41 5a			pop    %r10
+ *   41 5c			pop    %r12
+ *   41 5d			pop    %r13
+ *   41 5e			pop    %r14
+ *   c9				leaveq
+ *   49 8d 62 f8		lea    -0x8(%r10),%rsp
+ *   c3				retq
+ *
+ * and:
+ *
+ *   4c 8b 55 e8		mov    -0x18(%rbp),%r10
+ *   48 8b 5d e0		mov    -0x20(%rbp),%rbx
+ *   4c 8b 65 f0		mov    -0x10(%rbp),%r12
+ *   4c 8b 6d f8		mov    -0x8(%rbp),%r13
+ *   c9				leaveq
+ *   49 8d 62 f8		lea    -0x8(%r10),%rsp
+ *   c3				retq
+ *
+ * Sometimes r13 is used as the DRAP register, in which case it's saved and
+ * restored beforehand:
+ *
+ *   41 55			push   %r13
+ *   4c 8d 6c 24 10		lea    0x10(%rsp),%r13
+ *   48 83 e4 f0		and    $0xfffffffffffffff0,%rsp
+ *				...
+ *   49 8d 65 f0		lea    -0x10(%r13),%rsp
+ *   41 5d			pop    %r13
+ *   c3				retq
+ */
+static int update_insn_state(struct instruction *insn, struct insn_state *state)
+{
+	struct stack_op *op = &insn->stack_op;
+	struct cfi_reg *cfa = &state->cfa;
+	struct cfi_reg *regs = state->regs;
+
+	/* stack operations don't make sense with an undefined CFA */
+	if (cfa->base == CFI_UNDEFINED) {
+		if (insn->func) {
+			WARN_FUNC("undefined stack state", insn->sec, insn->offset);
+			return -1;
+		}
+		return 0;
+	}
+
+	if (state->type == ORC_TYPE_REGS || state->type == ORC_TYPE_REGS_IRET)
+		return update_insn_state_regs(insn, state);
+
+	switch (op->dest.type) {
+
+	case OP_DEST_REG:
+		switch (op->src.type) {
+
+		case OP_SRC_REG:
+			if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
+			    cfa->base == CFI_SP &&
+			    regs[CFI_BP].base == CFI_CFA &&
+			    regs[CFI_BP].offset == -cfa->offset) {
+
+				/* mov %rsp, %rbp */
+				cfa->base = op->dest.reg;
+				state->bp_scratch = false;
+			}
+
+			else if (op->src.reg == CFI_SP &&
+				 op->dest.reg == CFI_BP && state->drap) {
+
+				/* drap: mov %rsp, %rbp */
+				regs[CFI_BP].base = CFI_BP;
+				regs[CFI_BP].offset = -state->stack_size;
+				state->bp_scratch = false;
+			}
+
+			else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
+
+				/*
+				 * mov %rsp, %reg
+				 *
+				 * This is needed for the rare case where GCC
+				 * does:
+				 *
+				 *   mov    %rsp, %rax
+				 *   ...
+				 *   mov    %rax, %rsp
+				 */
+				state->vals[op->dest.reg].base = CFI_CFA;
+				state->vals[op->dest.reg].offset = -state->stack_size;
+			}
+
+			else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
+				 cfa->base == CFI_BP) {
+
+				/*
+				 * mov %rbp, %rsp
+				 *
+				 * Restore the original stack pointer (Clang).
+				 */
+				state->stack_size = -state->regs[CFI_BP].offset;
+			}
+
+			else if (op->dest.reg == cfa->base) {
+
+				/* mov %reg, %rsp */
+				if (cfa->base == CFI_SP &&
+				    state->vals[op->src.reg].base == CFI_CFA) {
+
+					/*
+					 * This is needed for the rare case
+					 * where GCC does something dumb like:
+					 *
+					 *   lea    0x8(%rsp), %rcx
+					 *   ...
+					 *   mov    %rcx, %rsp
+					 */
+					cfa->offset = -state->vals[op->src.reg].offset;
+					state->stack_size = cfa->offset;
+
+				} else {
+					cfa->base = CFI_UNDEFINED;
+					cfa->offset = 0;
+				}
+			}
+
+			break;
+
+		case OP_SRC_ADD:
+			if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
+
+				/* add imm, %rsp */
+				state->stack_size -= op->src.offset;
+				if (cfa->base == CFI_SP)
+					cfa->offset -= op->src.offset;
+				break;
+			}
+
+			if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
+
+				/* lea disp(%rbp), %rsp */
+				state->stack_size = -(op->src.offset + regs[CFI_BP].offset);
+				break;
+			}
+
+			if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
+
+				/* drap: lea disp(%rsp), %drap */
+				state->drap_reg = op->dest.reg;
+
+				/*
+				 * lea disp(%rsp), %reg
+				 *
+				 * This is needed for the rare case where GCC
+				 * does something dumb like:
+				 *
+				 *   lea    0x8(%rsp), %rcx
+				 *   ...
+				 *   mov    %rcx, %rsp
+				 */
+				state->vals[op->dest.reg].base = CFI_CFA;
+				state->vals[op->dest.reg].offset = \
+					-state->stack_size + op->src.offset;
+
+				break;
+			}
+
+			if (state->drap && op->dest.reg == CFI_SP &&
+			    op->src.reg == state->drap_reg) {
+
+				 /* drap: lea disp(%drap), %rsp */
+				cfa->base = CFI_SP;
+				cfa->offset = state->stack_size = -op->src.offset;
+				state->drap_reg = CFI_UNDEFINED;
+				state->drap = false;
+				break;
+			}
+
+			if (op->dest.reg == state->cfa.base) {
+				WARN_FUNC("unsupported stack register modification",
+					  insn->sec, insn->offset);
+				return -1;
+			}
+
+			break;
+
+		case OP_SRC_AND:
+			if (op->dest.reg != CFI_SP ||
+			    (state->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
+			    (state->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
+				WARN_FUNC("unsupported stack pointer realignment",
+					  insn->sec, insn->offset);
+				return -1;
+			}
+
+			if (state->drap_reg != CFI_UNDEFINED) {
+				/* drap: and imm, %rsp */
+				cfa->base = state->drap_reg;
+				cfa->offset = state->stack_size = 0;
+				state->drap = true;
+			}
+
+			/*
+			 * Older versions of GCC (4.8ish) realign the stack
+			 * without DRAP, with a frame pointer.
+			 */
+
+			break;
+
+		case OP_SRC_POP:
+			if (!state->drap && op->dest.type == OP_DEST_REG &&
+			    op->dest.reg == cfa->base) {
+
+				/* pop %rbp */
+				cfa->base = CFI_SP;
+			}
+
+			if (state->drap && cfa->base == CFI_BP_INDIRECT &&
+			    op->dest.type == OP_DEST_REG &&
+			    op->dest.reg == state->drap_reg &&
+			    state->drap_offset == -state->stack_size) {
+
+				/* drap: pop %drap */
+				cfa->base = state->drap_reg;
+				cfa->offset = 0;
+				state->drap_offset = -1;
+
+			} else if (regs[op->dest.reg].offset == -state->stack_size) {
+
+				/* pop %reg */
+				restore_reg(state, op->dest.reg);
+			}
+
+			state->stack_size -= 8;
+			if (cfa->base == CFI_SP)
+				cfa->offset -= 8;
+
+			break;
+
+		case OP_SRC_REG_INDIRECT:
+			if (state->drap && op->src.reg == CFI_BP &&
+			    op->src.offset == state->drap_offset) {
+
+				/* drap: mov disp(%rbp), %drap */
+				cfa->base = state->drap_reg;
+				cfa->offset = 0;
+				state->drap_offset = -1;
+			}
+
+			if (state->drap && op->src.reg == CFI_BP &&
+			    op->src.offset == regs[op->dest.reg].offset) {
+
+				/* drap: mov disp(%rbp), %reg */
+				restore_reg(state, op->dest.reg);
+
+			} else if (op->src.reg == cfa->base &&
+			    op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
+
+				/* mov disp(%rbp), %reg */
+				/* mov disp(%rsp), %reg */
+				restore_reg(state, op->dest.reg);
+			}
+
+			break;
+
+		default:
+			WARN_FUNC("unknown stack-related instruction",
+				  insn->sec, insn->offset);
+			return -1;
+		}
+
+		break;
+
+	case OP_DEST_PUSH:
+		state->stack_size += 8;
+		if (cfa->base == CFI_SP)
+			cfa->offset += 8;
+
+		if (op->src.type != OP_SRC_REG)
+			break;
+
+		if (state->drap) {
+			if (op->src.reg == cfa->base && op->src.reg == state->drap_reg) {
+
+				/* drap: push %drap */
+				cfa->base = CFI_BP_INDIRECT;
+				cfa->offset = -state->stack_size;
+
+				/* save drap so we know when to restore it */
+				state->drap_offset = -state->stack_size;
+
+			} else if (op->src.reg == CFI_BP && cfa->base == state->drap_reg) {
+
+				/* drap: push %rbp */
+				state->stack_size = 0;
+
+			} else if (regs[op->src.reg].base == CFI_UNDEFINED) {
+
+				/* drap: push %reg */
+				save_reg(state, op->src.reg, CFI_BP, -state->stack_size);
+			}
+
+		} else {
+
+			/* push %reg */
+			save_reg(state, op->src.reg, CFI_CFA, -state->stack_size);
+		}
+
+		/* detect when asm code uses rbp as a scratch register */
+		if (!no_fp && insn->func && op->src.reg == CFI_BP &&
+		    cfa->base != CFI_BP)
+			state->bp_scratch = true;
+		break;
+
+	case OP_DEST_REG_INDIRECT:
+
+		if (state->drap) {
+			if (op->src.reg == cfa->base && op->src.reg == state->drap_reg) {
+
+				/* drap: mov %drap, disp(%rbp) */
+				cfa->base = CFI_BP_INDIRECT;
+				cfa->offset = op->dest.offset;
+
+				/* save drap offset so we know when to restore it */
+				state->drap_offset = op->dest.offset;
+			}
+
+			else if (regs[op->src.reg].base == CFI_UNDEFINED) {
+
+				/* drap: mov reg, disp(%rbp) */
+				save_reg(state, op->src.reg, CFI_BP, op->dest.offset);
+			}
+
+		} else if (op->dest.reg == cfa->base) {
+
+			/* mov reg, disp(%rbp) */
+			/* mov reg, disp(%rsp) */
+			save_reg(state, op->src.reg, CFI_CFA,
+				 op->dest.offset - state->cfa.offset);
+		}
+
+		break;
+
+	case OP_DEST_LEAVE:
+		if ((!state->drap && cfa->base != CFI_BP) ||
+		    (state->drap && cfa->base != state->drap_reg)) {
+			WARN_FUNC("leave instruction with modified stack frame",
+				  insn->sec, insn->offset);
+			return -1;
+		}
+
+		/* leave (mov %rbp, %rsp; pop %rbp) */
+
+		state->stack_size = -state->regs[CFI_BP].offset - 8;
+		restore_reg(state, CFI_BP);
+
+		if (!state->drap) {
+			cfa->base = CFI_SP;
+			cfa->offset -= 8;
+		}
+
+		break;
+
+	case OP_DEST_MEM:
+		if (op->src.type != OP_SRC_POP) {
+			WARN_FUNC("unknown stack-related memory operation",
+				  insn->sec, insn->offset);
+			return -1;
+		}
+
+		/* pop mem */
+		state->stack_size -= 8;
+		if (cfa->base == CFI_SP)
+			cfa->offset -= 8;
+
+		break;
+
+	default:
+		WARN_FUNC("unknown stack-related instruction",
+			  insn->sec, insn->offset);
+		return -1;
+	}
+
+	return 0;
+}
+
+static bool insn_state_match(struct instruction *insn, struct insn_state *state)
+{
+	struct insn_state *state1 = &insn->state, *state2 = state;
+	int i;
+
+	if (memcmp(&state1->cfa, &state2->cfa, sizeof(state1->cfa))) {
+		WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
+			  insn->sec, insn->offset,
+			  state1->cfa.base, state1->cfa.offset,
+			  state2->cfa.base, state2->cfa.offset);
+
+	} else if (memcmp(&state1->regs, &state2->regs, sizeof(state1->regs))) {
+		for (i = 0; i < CFI_NUM_REGS; i++) {
+			if (!memcmp(&state1->regs[i], &state2->regs[i],
+				    sizeof(struct cfi_reg)))
+				continue;
+
+			WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
+				  insn->sec, insn->offset,
+				  i, state1->regs[i].base, state1->regs[i].offset,
+				  i, state2->regs[i].base, state2->regs[i].offset);
+			break;
+		}
+
+	} else if (state1->type != state2->type) {
+		WARN_FUNC("stack state mismatch: type1=%d type2=%d",
+			  insn->sec, insn->offset, state1->type, state2->type);
+
+	} else if (state1->drap != state2->drap ||
+		 (state1->drap && state1->drap_reg != state2->drap_reg) ||
+		 (state1->drap && state1->drap_offset != state2->drap_offset)) {
+		WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
+			  insn->sec, insn->offset,
+			  state1->drap, state1->drap_reg, state1->drap_offset,
+			  state2->drap, state2->drap_reg, state2->drap_offset);
+
+	} else
+		return true;
+
+	return false;
+}
+
+/*
+ * Follow the branch starting at the given instruction, and recursively follow
+ * any other branches (jumps).  Meanwhile, track the frame pointer state at
+ * each instruction and validate all the rules described in
+ * tools/objtool/Documentation/stack-validation.txt.
+ */
+static int validate_branch(struct objtool_file *file, struct instruction *first,
+			   struct insn_state state)
+{
+	struct alternative *alt;
+	struct instruction *insn, *next_insn;
+	struct section *sec;
+	struct symbol *func = NULL;
+	int ret;
+
+	insn = first;
+	sec = insn->sec;
+
+	if (insn->alt_group && list_empty(&insn->alts)) {
+		WARN_FUNC("don't know how to handle branch to middle of alternative instruction group",
+			  sec, insn->offset);
+		return 1;
+	}
+
+	while (1) {
+		next_insn = next_insn_same_sec(file, insn);
+
+		if (file->c_file && func && insn->func && func != insn->func->pfunc) {
+			WARN("%s() falls through to next function %s()",
+			     func->name, insn->func->name);
+			return 1;
+		}
+
+		func = insn->func ? insn->func->pfunc : NULL;
+
+		if (func && insn->ignore) {
+			WARN_FUNC("BUG: why am I validating an ignored function?",
+				  sec, insn->offset);
+			return 1;
+		}
+
+		if (insn->visited) {
+			if (!insn->hint && !insn_state_match(insn, &state))
+				return 1;
+
+			return 0;
+		}
+
+		if (insn->hint) {
+			if (insn->restore) {
+				struct instruction *save_insn, *i;
+
+				i = insn;
+				save_insn = NULL;
+				func_for_each_insn_continue_reverse(file, insn->func, i) {
+					if (i->save) {
+						save_insn = i;
+						break;
+					}
+				}
+
+				if (!save_insn) {
+					WARN_FUNC("no corresponding CFI save for CFI restore",
+						  sec, insn->offset);
+					return 1;
+				}
+
+				if (!save_insn->visited) {
+					/*
+					 * Oops, no state to copy yet.
+					 * Hopefully we can reach this
+					 * instruction from another branch
+					 * after the save insn has been
+					 * visited.
+					 */
+					if (insn == first)
+						return 0;
+
+					WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo",
+						  sec, insn->offset);
+					return 1;
+				}
+
+				insn->state = save_insn->state;
+			}
+
+			state = insn->state;
+
+		} else
+			insn->state = state;
+
+		insn->visited = true;
+
+		if (!insn->ignore_alts) {
+			list_for_each_entry(alt, &insn->alts, list) {
+				ret = validate_branch(file, alt->insn, state);
+				if (ret)
+					return 1;
+			}
+		}
+
+		switch (insn->type) {
+
+		case INSN_RETURN:
+			if (func && has_modified_stack_frame(&state)) {
+				WARN_FUNC("return with modified stack frame",
+					  sec, insn->offset);
+				return 1;
+			}
+
+			if (state.bp_scratch) {
+				WARN("%s uses BP as a scratch register",
+				     insn->func->name);
+				return 1;
+			}
+
+			return 0;
+
+		case INSN_CALL:
+			if (is_fentry_call(insn))
+				break;
+
+			ret = dead_end_function(file, insn->call_dest);
+			if (ret == 1)
+				return 0;
+			if (ret == -1)
+				return 1;
+
+			/* fallthrough */
+		case INSN_CALL_DYNAMIC:
+			if (!no_fp && func && !has_valid_stack_frame(&state)) {
+				WARN_FUNC("call without frame pointer save/setup",
+					  sec, insn->offset);
+				return 1;
+			}
+			break;
+
+		case INSN_JUMP_CONDITIONAL:
+		case INSN_JUMP_UNCONDITIONAL:
+			if (insn->jump_dest &&
+			    (!func || !insn->jump_dest->func ||
+			     insn->jump_dest->func->pfunc == func)) {
+				ret = validate_branch(file, insn->jump_dest,
+						      state);
+				if (ret)
+					return 1;
+
+			} else if (func && has_modified_stack_frame(&state)) {
+				WARN_FUNC("sibling call from callable instruction with modified stack frame",
+					  sec, insn->offset);
+				return 1;
+			}
+
+			if (insn->type == INSN_JUMP_UNCONDITIONAL)
+				return 0;
+
+			break;
+
+		case INSN_JUMP_DYNAMIC:
+			if (func && list_empty(&insn->alts) &&
+			    has_modified_stack_frame(&state)) {
+				WARN_FUNC("sibling call from callable instruction with modified stack frame",
+					  sec, insn->offset);
+				return 1;
+			}
+
+			return 0;
+
+		case INSN_CONTEXT_SWITCH:
+			if (func && (!next_insn || !next_insn->hint)) {
+				WARN_FUNC("unsupported instruction in callable function",
+					  sec, insn->offset);
+				return 1;
+			}
+			return 0;
+
+		case INSN_STACK:
+			if (update_insn_state(insn, &state))
+				return 1;
+
+			break;
+
+		default:
+			break;
+		}
+
+		if (insn->dead_end)
+			return 0;
+
+		if (!next_insn) {
+			if (state.cfa.base == CFI_UNDEFINED)
+				return 0;
+			WARN("%s: unexpected end of section", sec->name);
+			return 1;
+		}
+
+		insn = next_insn;
+	}
+
+	return 0;
+}
+
+static int validate_unwind_hints(struct objtool_file *file)
+{
+	struct instruction *insn;
+	int ret, warnings = 0;
+	struct insn_state state;
+
+	if (!file->hints)
+		return 0;
+
+	clear_insn_state(&state);
+
+	for_each_insn(file, insn) {
+		if (insn->hint && !insn->visited) {
+			ret = validate_branch(file, insn, state);
+			warnings += ret;
+		}
+	}
+
+	return warnings;
+}
+
+static int validate_retpoline(struct objtool_file *file)
+{
+	struct instruction *insn;
+	int warnings = 0;
+
+	for_each_insn(file, insn) {
+		if (insn->type != INSN_JUMP_DYNAMIC &&
+		    insn->type != INSN_CALL_DYNAMIC)
+			continue;
+
+		if (insn->retpoline_safe)
+			continue;
+
+		/*
+		 * .init.text code is ran before userspace and thus doesn't
+		 * strictly need retpolines, except for modules which are
+		 * loaded late, they very much do need retpoline in their
+		 * .init.text
+		 */
+		if (!strcmp(insn->sec->name, ".init.text") && !module)
+			continue;
+
+		WARN_FUNC("indirect %s found in RETPOLINE build",
+			  insn->sec, insn->offset,
+			  insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
+
+		warnings++;
+	}
+
+	return warnings;
+}
+
+static bool is_kasan_insn(struct instruction *insn)
+{
+	return (insn->type == INSN_CALL &&
+		!strcmp(insn->call_dest->name, "__asan_handle_no_return"));
+}
+
+static bool is_ubsan_insn(struct instruction *insn)
+{
+	return (insn->type == INSN_CALL &&
+		!strcmp(insn->call_dest->name,
+			"__ubsan_handle_builtin_unreachable"));
+}
+
+static bool ignore_unreachable_insn(struct instruction *insn)
+{
+	int i;
+
+	if (insn->ignore || insn->type == INSN_NOP)
+		return true;
+
+	/*
+	 * Ignore any unused exceptions.  This can happen when a whitelisted
+	 * function has an exception table entry.
+	 *
+	 * Also ignore alternative replacement instructions.  This can happen
+	 * when a whitelisted function uses one of the ALTERNATIVE macros.
+	 */
+	if (!strcmp(insn->sec->name, ".fixup") ||
+	    !strcmp(insn->sec->name, ".altinstr_replacement") ||
+	    !strcmp(insn->sec->name, ".altinstr_aux"))
+		return true;
+
+	/*
+	 * Check if this (or a subsequent) instruction is related to
+	 * CONFIG_UBSAN or CONFIG_KASAN.
+	 *
+	 * End the search at 5 instructions to avoid going into the weeds.
+	 */
+	if (!insn->func)
+		return false;
+	for (i = 0; i < 5; i++) {
+
+		if (is_kasan_insn(insn) || is_ubsan_insn(insn))
+			return true;
+
+		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
+			if (insn->jump_dest &&
+			    insn->jump_dest->func == insn->func) {
+				insn = insn->jump_dest;
+				continue;
+			}
+
+			break;
+		}
+
+		if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
+			break;
+
+		insn = list_next_entry(insn, list);
+	}
+
+	return false;
+}
+
+static int validate_functions(struct objtool_file *file)
+{
+	struct section *sec;
+	struct symbol *func;
+	struct instruction *insn;
+	struct insn_state state;
+	int ret, warnings = 0;
+
+	clear_insn_state(&state);
+
+	state.cfa = initial_func_cfi.cfa;
+	memcpy(&state.regs, &initial_func_cfi.regs,
+	       CFI_NUM_REGS * sizeof(struct cfi_reg));
+	state.stack_size = initial_func_cfi.cfa.offset;
+
+	for_each_sec(file, sec) {
+		list_for_each_entry(func, &sec->symbol_list, list) {
+			if (func->type != STT_FUNC || func->pfunc != func)
+				continue;
+
+			insn = find_insn(file, sec, func->offset);
+			if (!insn || insn->ignore)
+				continue;
+
+			ret = validate_branch(file, insn, state);
+			warnings += ret;
+		}
+	}
+
+	return warnings;
+}
+
+static int validate_reachable_instructions(struct objtool_file *file)
+{
+	struct instruction *insn;
+
+	if (file->ignore_unreachables)
+		return 0;
+
+	for_each_insn(file, insn) {
+		if (insn->visited || ignore_unreachable_insn(insn))
+			continue;
+
+		WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
+		return 1;
+	}
+
+	return 0;
+}
+
+static void cleanup(struct objtool_file *file)
+{
+	struct instruction *insn, *tmpinsn;
+	struct alternative *alt, *tmpalt;
+
+	list_for_each_entry_safe(insn, tmpinsn, &file->insn_list, list) {
+		list_for_each_entry_safe(alt, tmpalt, &insn->alts, list) {
+			list_del(&alt->list);
+			free(alt);
+		}
+		list_del(&insn->list);
+		hash_del(&insn->hash);
+		free(insn);
+	}
+	elf_close(file->elf);
+}
+
+int check(const char *_objname, bool orc)
+{
+	struct objtool_file file;
+	int ret, warnings = 0;
+
+	objname = _objname;
+
+	file.elf = elf_open(objname, orc ? O_RDWR : O_RDONLY);
+	if (!file.elf)
+		return 1;
+
+	INIT_LIST_HEAD(&file.insn_list);
+	hash_init(file.insn_hash);
+	file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
+	file.rodata = find_section_by_name(file.elf, ".rodata");
+	file.c_file = find_section_by_name(file.elf, ".comment");
+	file.ignore_unreachables = no_unreachable;
+	file.hints = false;
+
+	arch_initial_func_cfi_state(&initial_func_cfi);
+
+	ret = decode_sections(&file);
+	if (ret < 0)
+		goto out;
+	warnings += ret;
+
+	if (list_empty(&file.insn_list))
+		goto out;
+
+	if (retpoline) {
+		ret = validate_retpoline(&file);
+		if (ret < 0)
+			return ret;
+		warnings += ret;
+	}
+
+	ret = validate_functions(&file);
+	if (ret < 0)
+		goto out;
+	warnings += ret;
+
+	ret = validate_unwind_hints(&file);
+	if (ret < 0)
+		goto out;
+	warnings += ret;
+
+	if (!warnings) {
+		ret = validate_reachable_instructions(&file);
+		if (ret < 0)
+			goto out;
+		warnings += ret;
+	}
+
+	if (orc) {
+		ret = create_orc(&file);
+		if (ret < 0)
+			goto out;
+
+		ret = create_orc_sections(&file);
+		if (ret < 0)
+			goto out;
+
+		ret = elf_write(file.elf);
+		if (ret < 0)
+			goto out;
+	}
+
+out:
+	cleanup(&file);
+
+	/* ignore warnings for now until we get all the code cleaned up */
+	if (ret || warnings)
+		return 0;
+	return 0;
+}
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
new file mode 100644
index 0000000..c6b68fc
--- /dev/null
+++ b/tools/objtool/check.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CHECK_H
+#define _CHECK_H
+
+#include <stdbool.h>
+#include "elf.h"
+#include "cfi.h"
+#include "arch.h"
+#include "orc.h"
+#include <linux/hashtable.h>
+
+struct insn_state {
+	struct cfi_reg cfa;
+	struct cfi_reg regs[CFI_NUM_REGS];
+	int stack_size;
+	unsigned char type;
+	bool bp_scratch;
+	bool drap;
+	int drap_reg, drap_offset;
+	struct cfi_reg vals[CFI_NUM_REGS];
+};
+
+struct instruction {
+	struct list_head list;
+	struct hlist_node hash;
+	struct section *sec;
+	unsigned long offset;
+	unsigned int len;
+	unsigned char type;
+	unsigned long immediate;
+	bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
+	bool retpoline_safe;
+	struct symbol *call_dest;
+	struct instruction *jump_dest;
+	struct instruction *first_jump_src;
+	struct list_head alts;
+	struct symbol *func;
+	struct stack_op stack_op;
+	struct insn_state state;
+	struct orc_entry orc;
+};
+
+struct objtool_file {
+	struct elf *elf;
+	struct list_head insn_list;
+	DECLARE_HASHTABLE(insn_hash, 16);
+	struct section *rodata, *whitelist;
+	bool ignore_unreachables, c_file, hints;
+};
+
+int check(const char *objname, bool orc);
+
+struct instruction *find_insn(struct objtool_file *file,
+			      struct section *sec, unsigned long offset);
+
+#define for_each_insn(file, insn)					\
+	list_for_each_entry(insn, &file->insn_list, list)
+
+#define sec_for_each_insn(file, sec, insn)				\
+	for (insn = find_insn(file, sec, 0);				\
+	     insn && &insn->list != &file->insn_list &&			\
+			insn->sec == sec;				\
+	     insn = list_next_entry(insn, list))
+
+
+#endif /* _CHECK_H */
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index faacf0c..4e60e10 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -31,13 +31,6 @@
 #include "elf.h"
 #include "warn.h"
 
-/*
- * Fallback for systems without this "read, mmaping if possible" cmd.
- */
-#ifndef ELF_C_READ_MMAP
-#define ELF_C_READ_MMAP ELF_C_READ
-#endif
-
 struct section *find_section_by_name(struct elf *elf, const char *name)
 {
 	struct section *sec;
@@ -86,6 +79,19 @@
 	return NULL;
 }
 
+struct symbol *find_symbol_by_name(struct elf *elf, const char *name)
+{
+	struct section *sec;
+	struct symbol *sym;
+
+	list_for_each_entry(sec, &elf->sections, list)
+		list_for_each_entry(sym, &sec->symbol_list, list)
+			if (!strcmp(sym->name, name))
+				return sym;
+
+	return NULL;
+}
+
 struct symbol *find_symbol_containing(struct section *sec, unsigned long offset)
 {
 	struct symbol *sym;
@@ -140,12 +146,12 @@
 	int i;
 
 	if (elf_getshdrnum(elf->elf, &sections_nr)) {
-		perror("elf_getshdrnum");
+		WARN_ELF("elf_getshdrnum");
 		return -1;
 	}
 
 	if (elf_getshdrstrndx(elf->elf, &shstrndx)) {
-		perror("elf_getshdrstrndx");
+		WARN_ELF("elf_getshdrstrndx");
 		return -1;
 	}
 
@@ -166,37 +172,37 @@
 
 		s = elf_getscn(elf->elf, i);
 		if (!s) {
-			perror("elf_getscn");
+			WARN_ELF("elf_getscn");
 			return -1;
 		}
 
 		sec->idx = elf_ndxscn(s);
 
 		if (!gelf_getshdr(s, &sec->sh)) {
-			perror("gelf_getshdr");
+			WARN_ELF("gelf_getshdr");
 			return -1;
 		}
 
 		sec->name = elf_strptr(elf->elf, shstrndx, sec->sh.sh_name);
 		if (!sec->name) {
-			perror("elf_strptr");
+			WARN_ELF("elf_strptr");
 			return -1;
 		}
 
-		sec->elf_data = elf_getdata(s, NULL);
-		if (!sec->elf_data) {
-			perror("elf_getdata");
-			return -1;
+		if (sec->sh.sh_size != 0) {
+			sec->data = elf_getdata(s, NULL);
+			if (!sec->data) {
+				WARN_ELF("elf_getdata");
+				return -1;
+			}
+			if (sec->data->d_off != 0 ||
+			    sec->data->d_size != sec->sh.sh_size) {
+				WARN("unexpected data attributes for %s",
+				     sec->name);
+				return -1;
+			}
 		}
-
-		if (sec->elf_data->d_off != 0 ||
-		    sec->elf_data->d_size != sec->sh.sh_size) {
-			WARN("unexpected data attributes for %s", sec->name);
-			return -1;
-		}
-
-		sec->data = (unsigned long)sec->elf_data->d_buf;
-		sec->len = sec->elf_data->d_size;
+		sec->len = sec->sh.sh_size;
 	}
 
 	/* sanity check, one more call to elf_nextscn() should return NULL */
@@ -210,10 +216,11 @@
 
 static int read_symbols(struct elf *elf)
 {
-	struct section *symtab;
-	struct symbol *sym;
+	struct section *symtab, *sec;
+	struct symbol *sym, *pfunc;
 	struct list_head *entry, *tmp;
 	int symbols_nr, i;
+	char *coldstr;
 
 	symtab = find_section_by_name(elf, ".symtab");
 	if (!symtab) {
@@ -233,15 +240,15 @@
 
 		sym->idx = i;
 
-		if (!gelf_getsym(symtab->elf_data, i, &sym->sym)) {
-			perror("gelf_getsym");
+		if (!gelf_getsym(symtab->data, i, &sym->sym)) {
+			WARN_ELF("gelf_getsym");
 			goto err;
 		}
 
 		sym->name = elf_strptr(elf->elf, symtab->sh.sh_link,
 				       sym->sym.st_name);
 		if (!sym->name) {
-			perror("elf_strptr");
+			WARN_ELF("elf_strptr");
 			goto err;
 		}
 
@@ -288,6 +295,30 @@
 		hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx);
 	}
 
+	/* Create parent/child links for any cold subfunctions */
+	list_for_each_entry(sec, &elf->sections, list) {
+		list_for_each_entry(sym, &sec->symbol_list, list) {
+			if (sym->type != STT_FUNC)
+				continue;
+			sym->pfunc = sym->cfunc = sym;
+			coldstr = strstr(sym->name, ".cold.");
+			if (coldstr) {
+				coldstr[0] = '\0';
+				pfunc = find_symbol_by_name(elf, sym->name);
+				coldstr[0] = '.';
+
+				if (!pfunc) {
+					WARN("%s(): can't find parent function",
+					     sym->name);
+					goto err;
+				}
+
+				sym->pfunc = pfunc;
+				pfunc->cfunc = sym;
+			}
+		}
+	}
+
 	return 0;
 
 err:
@@ -323,8 +354,8 @@
 			}
 			memset(rela, 0, sizeof(*rela));
 
-			if (!gelf_getrela(sec->elf_data, i, &rela->rela)) {
-				perror("gelf_getrela");
+			if (!gelf_getrela(sec->data, i, &rela->rela)) {
+				WARN_ELF("gelf_getrela");
 				return -1;
 			}
 
@@ -348,9 +379,10 @@
 	return 0;
 }
 
-struct elf *elf_open(const char *name)
+struct elf *elf_open(const char *name, int flags)
 {
 	struct elf *elf;
+	Elf_Cmd cmd;
 
 	elf_version(EV_CURRENT);
 
@@ -363,27 +395,28 @@
 
 	INIT_LIST_HEAD(&elf->sections);
 
-	elf->name = strdup(name);
-	if (!elf->name) {
-		perror("strdup");
-		goto err;
-	}
-
-	elf->fd = open(name, O_RDONLY);
+	elf->fd = open(name, flags);
 	if (elf->fd == -1) {
 		fprintf(stderr, "objtool: Can't open '%s': %s\n",
 			name, strerror(errno));
 		goto err;
 	}
 
-	elf->elf = elf_begin(elf->fd, ELF_C_READ_MMAP, NULL);
+	if ((flags & O_ACCMODE) == O_RDONLY)
+		cmd = ELF_C_READ_MMAP;
+	else if ((flags & O_ACCMODE) == O_RDWR)
+		cmd = ELF_C_RDWR;
+	else /* O_WRONLY */
+		cmd = ELF_C_WRITE;
+
+	elf->elf = elf_begin(elf->fd, cmd, NULL);
 	if (!elf->elf) {
-		perror("elf_begin");
+		WARN_ELF("elf_begin");
 		goto err;
 	}
 
 	if (!gelf_getehdr(elf->elf, &elf->ehdr)) {
-		perror("gelf_getehdr");
+		WARN_ELF("gelf_getehdr");
 		goto err;
 	}
 
@@ -403,12 +436,212 @@
 	return NULL;
 }
 
+struct section *elf_create_section(struct elf *elf, const char *name,
+				   size_t entsize, int nr)
+{
+	struct section *sec, *shstrtab;
+	size_t size = entsize * nr;
+	struct Elf_Scn *s;
+	Elf_Data *data;
+
+	sec = malloc(sizeof(*sec));
+	if (!sec) {
+		perror("malloc");
+		return NULL;
+	}
+	memset(sec, 0, sizeof(*sec));
+
+	INIT_LIST_HEAD(&sec->symbol_list);
+	INIT_LIST_HEAD(&sec->rela_list);
+	hash_init(sec->rela_hash);
+	hash_init(sec->symbol_hash);
+
+	list_add_tail(&sec->list, &elf->sections);
+
+	s = elf_newscn(elf->elf);
+	if (!s) {
+		WARN_ELF("elf_newscn");
+		return NULL;
+	}
+
+	sec->name = strdup(name);
+	if (!sec->name) {
+		perror("strdup");
+		return NULL;
+	}
+
+	sec->idx = elf_ndxscn(s);
+	sec->len = size;
+	sec->changed = true;
+
+	sec->data = elf_newdata(s);
+	if (!sec->data) {
+		WARN_ELF("elf_newdata");
+		return NULL;
+	}
+
+	sec->data->d_size = size;
+	sec->data->d_align = 1;
+
+	if (size) {
+		sec->data->d_buf = malloc(size);
+		if (!sec->data->d_buf) {
+			perror("malloc");
+			return NULL;
+		}
+		memset(sec->data->d_buf, 0, size);
+	}
+
+	if (!gelf_getshdr(s, &sec->sh)) {
+		WARN_ELF("gelf_getshdr");
+		return NULL;
+	}
+
+	sec->sh.sh_size = size;
+	sec->sh.sh_entsize = entsize;
+	sec->sh.sh_type = SHT_PROGBITS;
+	sec->sh.sh_addralign = 1;
+	sec->sh.sh_flags = SHF_ALLOC;
+
+
+	/* Add section name to .shstrtab */
+	shstrtab = find_section_by_name(elf, ".shstrtab");
+	if (!shstrtab) {
+		WARN("can't find .shstrtab section");
+		return NULL;
+	}
+
+	s = elf_getscn(elf->elf, shstrtab->idx);
+	if (!s) {
+		WARN_ELF("elf_getscn");
+		return NULL;
+	}
+
+	data = elf_newdata(s);
+	if (!data) {
+		WARN_ELF("elf_newdata");
+		return NULL;
+	}
+
+	data->d_buf = sec->name;
+	data->d_size = strlen(name) + 1;
+	data->d_align = 1;
+
+	sec->sh.sh_name = shstrtab->len;
+
+	shstrtab->len += strlen(name) + 1;
+	shstrtab->changed = true;
+
+	return sec;
+}
+
+struct section *elf_create_rela_section(struct elf *elf, struct section *base)
+{
+	char *relaname;
+	struct section *sec;
+
+	relaname = malloc(strlen(base->name) + strlen(".rela") + 1);
+	if (!relaname) {
+		perror("malloc");
+		return NULL;
+	}
+	strcpy(relaname, ".rela");
+	strcat(relaname, base->name);
+
+	sec = elf_create_section(elf, relaname, sizeof(GElf_Rela), 0);
+	free(relaname);
+	if (!sec)
+		return NULL;
+
+	base->rela = sec;
+	sec->base = base;
+
+	sec->sh.sh_type = SHT_RELA;
+	sec->sh.sh_addralign = 8;
+	sec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx;
+	sec->sh.sh_info = base->idx;
+	sec->sh.sh_flags = SHF_INFO_LINK;
+
+	return sec;
+}
+
+int elf_rebuild_rela_section(struct section *sec)
+{
+	struct rela *rela;
+	int nr, idx = 0, size;
+	GElf_Rela *relas;
+
+	nr = 0;
+	list_for_each_entry(rela, &sec->rela_list, list)
+		nr++;
+
+	size = nr * sizeof(*relas);
+	relas = malloc(size);
+	if (!relas) {
+		perror("malloc");
+		return -1;
+	}
+
+	sec->data->d_buf = relas;
+	sec->data->d_size = size;
+
+	sec->sh.sh_size = size;
+
+	idx = 0;
+	list_for_each_entry(rela, &sec->rela_list, list) {
+		relas[idx].r_offset = rela->offset;
+		relas[idx].r_addend = rela->addend;
+		relas[idx].r_info = GELF_R_INFO(rela->sym->idx, rela->type);
+		idx++;
+	}
+
+	return 0;
+}
+
+int elf_write(struct elf *elf)
+{
+	struct section *sec;
+	Elf_Scn *s;
+
+	/* Update section headers for changed sections: */
+	list_for_each_entry(sec, &elf->sections, list) {
+		if (sec->changed) {
+			s = elf_getscn(elf->elf, sec->idx);
+			if (!s) {
+				WARN_ELF("elf_getscn");
+				return -1;
+			}
+			if (!gelf_update_shdr(s, &sec->sh)) {
+				WARN_ELF("gelf_update_shdr");
+				return -1;
+			}
+		}
+	}
+
+	/* Make sure the new section header entries get updated properly. */
+	elf_flagelf(elf->elf, ELF_C_SET, ELF_F_DIRTY);
+
+	/* Write all changes to the file. */
+	if (elf_update(elf->elf, ELF_C_WRITE) < 0) {
+		WARN_ELF("elf_update");
+		return -1;
+	}
+
+	return 0;
+}
+
 void elf_close(struct elf *elf)
 {
 	struct section *sec, *tmpsec;
 	struct symbol *sym, *tmpsym;
 	struct rela *rela, *tmprela;
 
+	if (elf->elf)
+		elf_end(elf->elf);
+
+	if (elf->fd > 0)
+		close(elf->fd);
+
 	list_for_each_entry_safe(sec, tmpsec, &elf->sections, list) {
 		list_for_each_entry_safe(sym, tmpsym, &sec->symbol_list, list) {
 			list_del(&sym->list);
@@ -423,11 +656,6 @@
 		list_del(&sec->list);
 		free(sec);
 	}
-	if (elf->name)
-		free(elf->name);
-	if (elf->fd > 0)
-		close(elf->fd);
-	if (elf->elf)
-		elf_end(elf->elf);
+
 	free(elf);
 }
diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
index 731973e..de5cd2d 100644
--- a/tools/objtool/elf.h
+++ b/tools/objtool/elf.h
@@ -28,6 +28,13 @@
 # define elf_getshdrstrndx elf_getshstrndx
 #endif
 
+/*
+ * Fallback for systems without this "read, mmaping if possible" cmd.
+ */
+#ifndef ELF_C_READ_MMAP
+#define ELF_C_READ_MMAP ELF_C_READ
+#endif
+
 struct section {
 	struct list_head list;
 	GElf_Shdr sh;
@@ -37,11 +44,11 @@
 	DECLARE_HASHTABLE(rela_hash, 16);
 	struct section *base, *rela;
 	struct symbol *sym;
-	Elf_Data *elf_data;
+	Elf_Data *data;
 	char *name;
 	int idx;
-	unsigned long data;
 	unsigned int len;
+	bool changed, text;
 };
 
 struct symbol {
@@ -54,6 +61,7 @@
 	unsigned char bind, type;
 	unsigned long offset;
 	unsigned int len;
+	struct symbol *pfunc, *cfunc;
 };
 
 struct rela {
@@ -76,16 +84,23 @@
 };
 
 
-struct elf *elf_open(const char *name);
+struct elf *elf_open(const char *name, int flags);
 struct section *find_section_by_name(struct elf *elf, const char *name);
 struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
+struct symbol *find_symbol_by_name(struct elf *elf, const char *name);
 struct symbol *find_symbol_containing(struct section *sec, unsigned long offset);
 struct rela *find_rela_by_dest(struct section *sec, unsigned long offset);
 struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,
 				     unsigned int len);
 struct symbol *find_containing_func(struct section *sec, unsigned long offset);
+struct section *elf_create_section(struct elf *elf, const char *name, size_t
+				   entsize, int nr);
+struct section *elf_create_rela_section(struct elf *elf, struct section *base);
+int elf_rebuild_rela_section(struct section *sec);
+int elf_write(struct elf *elf);
 void elf_close(struct elf *elf);
 
-
+#define for_each_sec(file, sec)						\
+	list_for_each_entry(sec, &file->elf->sections, list)
 
 #endif /* _OBJTOOL_ELF_H */
diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
index 46c326d..07f3299 100644
--- a/tools/objtool/objtool.c
+++ b/tools/objtool/objtool.c
@@ -31,11 +31,10 @@
 #include <stdlib.h>
 #include <subcmd/exec-cmd.h>
 #include <subcmd/pager.h>
+#include <linux/kernel.h>
 
 #include "builtin.h"
 
-#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
-
 struct cmd_struct {
 	const char *name;
 	int (*fn)(int, const char **);
@@ -43,10 +42,11 @@
 };
 
 static const char objtool_usage_string[] =
-	"objtool [OPTIONS] COMMAND [ARGS]";
+	"objtool COMMAND [ARGS]";
 
 static struct cmd_struct objtool_cmds[] = {
 	{"check",	cmd_check,	"Perform stack metadata validation on an object file" },
+	{"orc",		cmd_orc,	"Generate in-place ORC unwind tables for an object file" },
 };
 
 bool help;
@@ -70,7 +70,7 @@
 
 	printf("\n");
 
-	exit(1);
+	exit(129);
 }
 
 static void handle_options(int *argc, const char ***argv)
@@ -86,9 +86,7 @@
 			break;
 		} else {
 			fprintf(stderr, "Unknown option: %s\n", cmd);
-			fprintf(stderr, "\n Usage: %s\n",
-				objtool_usage_string);
-			exit(1);
+			cmd_usage();
 		}
 
 		(*argv)++;
diff --git a/tools/objtool/orc.h b/tools/objtool/orc.h
new file mode 100644
index 0000000..b0e92a6
--- /dev/null
+++ b/tools/objtool/orc.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ORC_H
+#define _ORC_H
+
+#include <asm/orc_types.h>
+
+struct objtool_file;
+
+int create_orc(struct objtool_file *file);
+int create_orc_sections(struct objtool_file *file);
+
+int orc_dump(const char *objname);
+
+#endif /* _ORC_H */
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
new file mode 100644
index 0000000..c334382
--- /dev/null
+++ b/tools/objtool/orc_dump.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <unistd.h>
+#include "orc.h"
+#include "warn.h"
+
+static const char *reg_name(unsigned int reg)
+{
+	switch (reg) {
+	case ORC_REG_PREV_SP:
+		return "prevsp";
+	case ORC_REG_DX:
+		return "dx";
+	case ORC_REG_DI:
+		return "di";
+	case ORC_REG_BP:
+		return "bp";
+	case ORC_REG_SP:
+		return "sp";
+	case ORC_REG_R10:
+		return "r10";
+	case ORC_REG_R13:
+		return "r13";
+	case ORC_REG_BP_INDIRECT:
+		return "bp(ind)";
+	case ORC_REG_SP_INDIRECT:
+		return "sp(ind)";
+	default:
+		return "?";
+	}
+}
+
+static const char *orc_type_name(unsigned int type)
+{
+	switch (type) {
+	case ORC_TYPE_CALL:
+		return "call";
+	case ORC_TYPE_REGS:
+		return "regs";
+	case ORC_TYPE_REGS_IRET:
+		return "iret";
+	default:
+		return "?";
+	}
+}
+
+static void print_reg(unsigned int reg, int offset)
+{
+	if (reg == ORC_REG_BP_INDIRECT)
+		printf("(bp%+d)", offset);
+	else if (reg == ORC_REG_SP_INDIRECT)
+		printf("(sp%+d)", offset);
+	else if (reg == ORC_REG_UNDEFINED)
+		printf("(und)");
+	else
+		printf("%s%+d", reg_name(reg), offset);
+}
+
+int orc_dump(const char *_objname)
+{
+	int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0;
+	struct orc_entry *orc = NULL;
+	char *name;
+	size_t nr_sections;
+	Elf64_Addr orc_ip_addr = 0;
+	size_t shstrtab_idx;
+	Elf *elf;
+	Elf_Scn *scn;
+	GElf_Shdr sh;
+	GElf_Rela rela;
+	GElf_Sym sym;
+	Elf_Data *data, *symtab = NULL, *rela_orc_ip = NULL;
+
+
+	objname = _objname;
+
+	elf_version(EV_CURRENT);
+
+	fd = open(objname, O_RDONLY);
+	if (fd == -1) {
+		perror("open");
+		return -1;
+	}
+
+	elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
+	if (!elf) {
+		WARN_ELF("elf_begin");
+		return -1;
+	}
+
+	if (elf_getshdrnum(elf, &nr_sections)) {
+		WARN_ELF("elf_getshdrnum");
+		return -1;
+	}
+
+	if (elf_getshdrstrndx(elf, &shstrtab_idx)) {
+		WARN_ELF("elf_getshdrstrndx");
+		return -1;
+	}
+
+	for (i = 0; i < nr_sections; i++) {
+		scn = elf_getscn(elf, i);
+		if (!scn) {
+			WARN_ELF("elf_getscn");
+			return -1;
+		}
+
+		if (!gelf_getshdr(scn, &sh)) {
+			WARN_ELF("gelf_getshdr");
+			return -1;
+		}
+
+		name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
+		if (!name) {
+			WARN_ELF("elf_strptr");
+			return -1;
+		}
+
+		data = elf_getdata(scn, NULL);
+		if (!data) {
+			WARN_ELF("elf_getdata");
+			return -1;
+		}
+
+		if (!strcmp(name, ".symtab")) {
+			symtab = data;
+		} else if (!strcmp(name, ".orc_unwind")) {
+			orc = data->d_buf;
+			orc_size = sh.sh_size;
+		} else if (!strcmp(name, ".orc_unwind_ip")) {
+			orc_ip = data->d_buf;
+			orc_ip_addr = sh.sh_addr;
+		} else if (!strcmp(name, ".rela.orc_unwind_ip")) {
+			rela_orc_ip = data;
+		}
+	}
+
+	if (!symtab || !orc || !orc_ip)
+		return 0;
+
+	if (orc_size % sizeof(*orc) != 0) {
+		WARN("bad .orc_unwind section size");
+		return -1;
+	}
+
+	nr_entries = orc_size / sizeof(*orc);
+	for (i = 0; i < nr_entries; i++) {
+		if (rela_orc_ip) {
+			if (!gelf_getrela(rela_orc_ip, i, &rela)) {
+				WARN_ELF("gelf_getrela");
+				return -1;
+			}
+
+			if (!gelf_getsym(symtab, GELF_R_SYM(rela.r_info), &sym)) {
+				WARN_ELF("gelf_getsym");
+				return -1;
+			}
+
+			scn = elf_getscn(elf, sym.st_shndx);
+			if (!scn) {
+				WARN_ELF("elf_getscn");
+				return -1;
+			}
+
+			if (!gelf_getshdr(scn, &sh)) {
+				WARN_ELF("gelf_getshdr");
+				return -1;
+			}
+
+			name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
+			if (!name || !*name) {
+				WARN_ELF("elf_strptr");
+				return -1;
+			}
+
+			printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
+
+		} else {
+			printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i]));
+		}
+
+
+		printf(" sp:");
+
+		print_reg(orc[i].sp_reg, orc[i].sp_offset);
+
+		printf(" bp:");
+
+		print_reg(orc[i].bp_reg, orc[i].bp_offset);
+
+		printf(" type:%s\n", orc_type_name(orc[i].type));
+	}
+
+	elf_end(elf);
+	close(fd);
+
+	return 0;
+}
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
new file mode 100644
index 0000000..18384d9
--- /dev/null
+++ b/tools/objtool/orc_gen.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "orc.h"
+#include "check.h"
+#include "warn.h"
+
+int create_orc(struct objtool_file *file)
+{
+	struct instruction *insn;
+
+	for_each_insn(file, insn) {
+		struct orc_entry *orc = &insn->orc;
+		struct cfi_reg *cfa = &insn->state.cfa;
+		struct cfi_reg *bp = &insn->state.regs[CFI_BP];
+
+		if (cfa->base == CFI_UNDEFINED) {
+			orc->sp_reg = ORC_REG_UNDEFINED;
+			continue;
+		}
+
+		switch (cfa->base) {
+		case CFI_SP:
+			orc->sp_reg = ORC_REG_SP;
+			break;
+		case CFI_SP_INDIRECT:
+			orc->sp_reg = ORC_REG_SP_INDIRECT;
+			break;
+		case CFI_BP:
+			orc->sp_reg = ORC_REG_BP;
+			break;
+		case CFI_BP_INDIRECT:
+			orc->sp_reg = ORC_REG_BP_INDIRECT;
+			break;
+		case CFI_R10:
+			orc->sp_reg = ORC_REG_R10;
+			break;
+		case CFI_R13:
+			orc->sp_reg = ORC_REG_R13;
+			break;
+		case CFI_DI:
+			orc->sp_reg = ORC_REG_DI;
+			break;
+		case CFI_DX:
+			orc->sp_reg = ORC_REG_DX;
+			break;
+		default:
+			WARN_FUNC("unknown CFA base reg %d",
+				  insn->sec, insn->offset, cfa->base);
+			return -1;
+		}
+
+		switch(bp->base) {
+		case CFI_UNDEFINED:
+			orc->bp_reg = ORC_REG_UNDEFINED;
+			break;
+		case CFI_CFA:
+			orc->bp_reg = ORC_REG_PREV_SP;
+			break;
+		case CFI_BP:
+			orc->bp_reg = ORC_REG_BP;
+			break;
+		default:
+			WARN_FUNC("unknown BP base reg %d",
+				  insn->sec, insn->offset, bp->base);
+			return -1;
+		}
+
+		orc->sp_offset = cfa->offset;
+		orc->bp_offset = bp->offset;
+		orc->type = insn->state.type;
+	}
+
+	return 0;
+}
+
+static int create_orc_entry(struct section *u_sec, struct section *ip_relasec,
+				unsigned int idx, struct section *insn_sec,
+				unsigned long insn_off, struct orc_entry *o)
+{
+	struct orc_entry *orc;
+	struct rela *rela;
+
+	if (!insn_sec->sym) {
+		WARN("missing symbol for section %s", insn_sec->name);
+		return -1;
+	}
+
+	/* populate ORC data */
+	orc = (struct orc_entry *)u_sec->data->d_buf + idx;
+	memcpy(orc, o, sizeof(*orc));
+
+	/* populate rela for ip */
+	rela = malloc(sizeof(*rela));
+	if (!rela) {
+		perror("malloc");
+		return -1;
+	}
+	memset(rela, 0, sizeof(*rela));
+
+	rela->sym = insn_sec->sym;
+	rela->addend = insn_off;
+	rela->type = R_X86_64_PC32;
+	rela->offset = idx * sizeof(int);
+
+	list_add_tail(&rela->list, &ip_relasec->rela_list);
+	hash_add(ip_relasec->rela_hash, &rela->hash, rela->offset);
+
+	return 0;
+}
+
+int create_orc_sections(struct objtool_file *file)
+{
+	struct instruction *insn, *prev_insn;
+	struct section *sec, *u_sec, *ip_relasec;
+	unsigned int idx;
+
+	struct orc_entry empty = {
+		.sp_reg = ORC_REG_UNDEFINED,
+		.bp_reg  = ORC_REG_UNDEFINED,
+		.type    = ORC_TYPE_CALL,
+	};
+
+	sec = find_section_by_name(file->elf, ".orc_unwind");
+	if (sec) {
+		WARN("file already has .orc_unwind section, skipping");
+		return -1;
+	}
+
+	/* count the number of needed orcs */
+	idx = 0;
+	for_each_sec(file, sec) {
+		if (!sec->text)
+			continue;
+
+		prev_insn = NULL;
+		sec_for_each_insn(file, sec, insn) {
+			if (!prev_insn ||
+			    memcmp(&insn->orc, &prev_insn->orc,
+				   sizeof(struct orc_entry))) {
+				idx++;
+			}
+			prev_insn = insn;
+		}
+
+		/* section terminator */
+		if (prev_insn)
+			idx++;
+	}
+	if (!idx)
+		return -1;
+
+
+	/* create .orc_unwind_ip and .rela.orc_unwind_ip sections */
+	sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx);
+	if (!sec)
+		return -1;
+
+	ip_relasec = elf_create_rela_section(file->elf, sec);
+	if (!ip_relasec)
+		return -1;
+
+	/* create .orc_unwind section */
+	u_sec = elf_create_section(file->elf, ".orc_unwind",
+				   sizeof(struct orc_entry), idx);
+
+	/* populate sections */
+	idx = 0;
+	for_each_sec(file, sec) {
+		if (!sec->text)
+			continue;
+
+		prev_insn = NULL;
+		sec_for_each_insn(file, sec, insn) {
+			if (!prev_insn || memcmp(&insn->orc, &prev_insn->orc,
+						 sizeof(struct orc_entry))) {
+
+				if (create_orc_entry(u_sec, ip_relasec, idx,
+						     insn->sec, insn->offset,
+						     &insn->orc))
+					return -1;
+
+				idx++;
+			}
+			prev_insn = insn;
+		}
+
+		/* section terminator */
+		if (prev_insn) {
+			if (create_orc_entry(u_sec, ip_relasec, idx,
+					     prev_insn->sec,
+					     prev_insn->offset + prev_insn->len,
+					     &empty))
+				return -1;
+
+			idx++;
+		}
+	}
+
+	if (elf_rebuild_rela_section(ip_relasec))
+		return -1;
+
+	return 0;
+}
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index bff8abb..84f001d 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -91,16 +91,16 @@
 	alt->jump_or_nop = entry->jump_or_nop;
 
 	if (alt->group) {
-		alt->orig_len = *(unsigned char *)(sec->data + offset +
+		alt->orig_len = *(unsigned char *)(sec->data->d_buf + offset +
 						   entry->orig_len);
-		alt->new_len = *(unsigned char *)(sec->data + offset +
+		alt->new_len = *(unsigned char *)(sec->data->d_buf + offset +
 						  entry->new_len);
 	}
 
 	if (entry->feature) {
 		unsigned short feature;
 
-		feature = *(unsigned short *)(sec->data + offset +
+		feature = *(unsigned short *)(sec->data->d_buf + offset +
 					      entry->feature);
 
 		/*
diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh
new file mode 100755
index 0000000..1470e74
--- /dev/null
+++ b/tools/objtool/sync-check.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+FILES='
+arch/x86/lib/insn.c
+arch/x86/lib/inat.c
+arch/x86/lib/x86-opcode-map.txt
+arch/x86/tools/gen-insn-attr-x86.awk
+arch/x86/include/asm/insn.h
+arch/x86/include/asm/inat.h
+arch/x86/include/asm/inat_types.h
+arch/x86/include/asm/orc_types.h
+'
+
+check()
+{
+	local file=$1
+
+	diff $file ../../$file > /dev/null ||
+		echo "Warning: synced file at 'tools/objtool/$file' differs from latest kernel version at '$file'"
+}
+
+if [ ! -d ../../kernel ] || [ ! -d ../../tools ] || [ ! -d ../objtool ]; then
+	exit 0
+fi
+
+for i in $FILES; do
+  check $i
+done
diff --git a/tools/objtool/warn.h b/tools/objtool/warn.h
index ac7e075..afd9f7a 100644
--- a/tools/objtool/warn.h
+++ b/tools/objtool/warn.h
@@ -18,6 +18,13 @@
 #ifndef _WARN_H
 #define _WARN_H
 
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "elf.h"
+
 extern const char *objname;
 
 static inline char *offstr(struct section *sec, unsigned long offset)
@@ -57,4 +64,7 @@
 	free(_str);					\
 })
 
+#define WARN_ELF(format, ...)				\
+	WARN(format ": %s", ##__VA_ARGS__, elf_errmsg(-1))
+
 #endif /* _WARN_H */
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 0bda2cc..a4f98e1 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -51,6 +51,7 @@
 tools/include/asm-generic/bitops/atomic.h
 tools/include/asm-generic/bitops/const_hweight.h
 tools/include/asm-generic/bitops/__ffs.h
+tools/include/asm-generic/bitops/__ffz.h
 tools/include/asm-generic/bitops/__fls.h
 tools/include/asm-generic/bitops/find.h
 tools/include/asm-generic/bitops/fls64.h
@@ -60,7 +61,9 @@
 tools/include/linux/atomic.h
 tools/include/linux/bitops.h
 tools/include/linux/compiler.h
+tools/include/linux/compiler-gcc.h
 tools/include/linux/coresight-pmu.h
+tools/include/linux/bug.h
 tools/include/linux/filter.h
 tools/include/linux/hash.h
 tools/include/linux/kernel.h
@@ -70,12 +73,15 @@
 tools/include/uapi/asm-generic/mman.h
 tools/include/uapi/linux/bpf.h
 tools/include/uapi/linux/bpf_common.h
+tools/include/uapi/linux/fcntl.h
 tools/include/uapi/linux/hw_breakpoint.h
 tools/include/uapi/linux/mman.h
 tools/include/uapi/linux/perf_event.h
+tools/include/uapi/linux/stat.h
 tools/include/linux/poison.h
 tools/include/linux/rbtree.h
 tools/include/linux/rbtree_augmented.h
+tools/include/linux/refcount.h
 tools/include/linux/string.h
 tools/include/linux/stringify.h
 tools/include/linux/types.h
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 2b92ffe..ad3726c 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -177,6 +177,36 @@
 endif
 endif
 
+# The fixdep build - we force fixdep tool to be built as
+# the first target in the separate make session not to be
+# disturbed by any parallel make jobs. Once fixdep is done
+# we issue the requested build with FIXDEP=1 variable.
+#
+# The fixdep build is disabled for $(NON_CONFIG_TARGETS)
+# targets, because it's not necessary.
+
+ifdef FIXDEP
+  force_fixdep := 0
+else
+  force_fixdep := $(config)
+endif
+
+export srctree OUTPUT RM CC CXX LD AR CFLAGS CXXFLAGS V BISON FLEX AWK
+export HOSTCC HOSTLD HOSTAR
+
+include $(srctree)/tools/build/Makefile.include
+
+ifeq ($(force_fixdep),1)
+goals := $(filter-out all sub-make, $(MAKECMDGOALS))
+
+$(goals) all: sub-make
+
+sub-make: fixdep
+	@./check-headers.sh
+	$(Q)$(MAKE) FIXDEP=1 -f Makefile.perf $(goals)
+
+else # force_fixdep
+
 # Set FEATURE_TESTS to 'all' so all possible feature checkers are executed.
 # Without this setting the output feature dump file misses some features, for
 # example, liberty. Select all checkers so we won't get an incomplete feature
@@ -348,10 +378,6 @@
 
 PERF_IN := $(OUTPUT)perf-in.o
 
-export srctree OUTPUT RM CC LD AR CFLAGS V BISON FLEX AWK
-export HOSTCC HOSTLD HOSTAR
-include $(srctree)/tools/build/Makefile.include
-
 JEVENTS       := $(OUTPUT)pmu-events/jevents
 JEVENTS_IN    := $(OUTPUT)pmu-events/jevents-in.o
 
@@ -362,99 +388,6 @@
 build := -f $(srctree)/tools/build/Makefile.build dir=. obj
 
 $(PERF_IN): prepare FORCE
-	@(test -f ../../include/uapi/linux/perf_event.h && ( \
-        (diff -B ../include/uapi/linux/perf_event.h ../../include/uapi/linux/perf_event.h >/dev/null) \
-        || echo "Warning: tools/include/uapi/linux/perf_event.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/linux/hash.h && ( \
-        (diff -B ../include/linux/hash.h ../../include/linux/hash.h >/dev/null) \
-        || echo "Warning: tools/include/linux/hash.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/uapi/linux/hw_breakpoint.h && ( \
-        (diff -B ../include/uapi/linux/hw_breakpoint.h ../../include/uapi/linux/hw_breakpoint.h >/dev/null) \
-        || echo "Warning: tools/include/uapi/linux/hw_breakpoint.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/include/asm/disabled-features.h && ( \
-        (diff -B ../arch/x86/include/asm/disabled-features.h ../../arch/x86/include/asm/disabled-features.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/asm/disabled-features.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/include/asm/required-features.h && ( \
-        (diff -B ../arch/x86/include/asm/required-features.h ../../arch/x86/include/asm/required-features.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/asm/required-features.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/include/asm/cpufeatures.h && ( \
-        (diff -B ../arch/x86/include/asm/cpufeatures.h ../../arch/x86/include/asm/cpufeatures.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/asm/cpufeatures.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/lib/memcpy_64.S && ( \
-        (diff -B ../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memcpy_64.S >/dev/null) \
-        || echo "Warning: tools/arch/x86/lib/memcpy_64.S differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/lib/memset_64.S && ( \
-        (diff -B ../arch/x86/lib/memset_64.S ../../arch/x86/lib/memset_64.S >/dev/null) \
-        || echo "Warning: tools/arch/x86/lib/memset_64.S differs from kernel" >&2 )) || true
-	@(test -f ../../arch/arm/include/uapi/asm/perf_regs.h && ( \
-        (diff -B ../arch/arm/include/uapi/asm/perf_regs.h ../../arch/arm/include/uapi/asm/perf_regs.h >/dev/null) \
-        || echo "Warning: tools/arch/arm/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/arm64/include/uapi/asm/perf_regs.h && ( \
-        (diff -B ../arch/arm64/include/uapi/asm/perf_regs.h ../../arch/arm64/include/uapi/asm/perf_regs.h >/dev/null) \
-        || echo "Warning: tools/arch/arm64/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/powerpc/include/uapi/asm/perf_regs.h && ( \
-        (diff -B ../arch/powerpc/include/uapi/asm/perf_regs.h ../../arch/powerpc/include/uapi/asm/perf_regs.h >/dev/null) \
-        || echo "Warning: tools/arch/powerpc/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/include/uapi/asm/perf_regs.h && ( \
-        (diff -B ../arch/x86/include/uapi/asm/perf_regs.h ../../arch/x86/include/uapi/asm/perf_regs.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/include/uapi/asm/kvm.h && ( \
-        (diff -B ../arch/x86/include/uapi/asm/kvm.h ../../arch/x86/include/uapi/asm/kvm.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/include/uapi/asm/kvm_perf.h && ( \
-        (diff -B ../arch/x86/include/uapi/asm/kvm_perf.h ../../arch/x86/include/uapi/asm/kvm_perf.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/uapi/asm/kvm_perf.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/include/uapi/asm/svm.h && ( \
-        (diff -B ../arch/x86/include/uapi/asm/svm.h ../../arch/x86/include/uapi/asm/svm.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/uapi/asm/svm.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/x86/include/uapi/asm/vmx.h && ( \
-        (diff -B ../arch/x86/include/uapi/asm/vmx.h ../../arch/x86/include/uapi/asm/vmx.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/uapi/asm/vmx.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/powerpc/include/uapi/asm/kvm.h && ( \
-        (diff -B ../arch/powerpc/include/uapi/asm/kvm.h ../../arch/powerpc/include/uapi/asm/kvm.h >/dev/null) \
-        || echo "Warning: tools/arch/powerpc/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/s390/include/uapi/asm/kvm.h && ( \
-        (diff -B ../arch/s390/include/uapi/asm/kvm.h ../../arch/s390/include/uapi/asm/kvm.h >/dev/null) \
-        || echo "Warning: tools/arch/s390/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/s390/include/uapi/asm/kvm_perf.h && ( \
-        (diff -B ../arch/s390/include/uapi/asm/kvm_perf.h ../../arch/s390/include/uapi/asm/kvm_perf.h >/dev/null) \
-        || echo "Warning: tools/arch/s390/include/uapi/asm/kvm_perf.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/s390/include/uapi/asm/sie.h && ( \
-        (diff -B ../arch/s390/include/uapi/asm/sie.h ../../arch/s390/include/uapi/asm/sie.h >/dev/null) \
-        || echo "Warning: tools/arch/s390/include/uapi/asm/sie.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/arm/include/uapi/asm/kvm.h && ( \
-        (diff -B ../arch/arm/include/uapi/asm/kvm.h ../../arch/arm/include/uapi/asm/kvm.h >/dev/null) \
-        || echo "Warning: tools/arch/arm/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
-	@(test -f ../../arch/arm64/include/uapi/asm/kvm.h && ( \
-        (diff -B ../arch/arm64/include/uapi/asm/kvm.h ../../arch/arm64/include/uapi/asm/kvm.h >/dev/null) \
-        || echo "Warning: tools/arch/arm64/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/asm-generic/bitops/arch_hweight.h && ( \
-        (diff -B ../include/asm-generic/bitops/arch_hweight.h ../../include/asm-generic/bitops/arch_hweight.h >/dev/null) \
-        || echo "Warning: tools/include/asm-generic/bitops/arch_hweight.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/asm-generic/bitops/const_hweight.h && ( \
-        (diff -B ../include/asm-generic/bitops/const_hweight.h ../../include/asm-generic/bitops/const_hweight.h >/dev/null) \
-        || echo "Warning: tools/include/asm-generic/bitops/const_hweight.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/asm-generic/bitops/__fls.h && ( \
-        (diff -B ../include/asm-generic/bitops/__fls.h ../../include/asm-generic/bitops/__fls.h >/dev/null) \
-        || echo "Warning: tools/include/asm-generic/bitops/__fls.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/asm-generic/bitops/fls.h && ( \
-        (diff -B ../include/asm-generic/bitops/fls.h ../../include/asm-generic/bitops/fls.h >/dev/null) \
-        || echo "Warning: tools/include/asm-generic/bitops/fls.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/asm-generic/bitops/fls64.h && ( \
-        (diff -B ../include/asm-generic/bitops/fls64.h ../../include/asm-generic/bitops/fls64.h >/dev/null) \
-        || echo "Warning: tools/include/asm-generic/bitops/fls64.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/linux/coresight-pmu.h && ( \
-	(diff -B ../include/linux/coresight-pmu.h ../../include/linux/coresight-pmu.h >/dev/null) \
-	|| echo "Warning: tools/include/linux/coresight-pmu.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/uapi/asm-generic/mman-common.h && ( \
-	(diff -B ../include/uapi/asm-generic/mman-common.h ../../include/uapi/asm-generic/mman-common.h >/dev/null) \
-	|| echo "Warning: tools/include/uapi/asm-generic/mman-common.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/uapi/asm-generic/mman.h && ( \
-	(diff -B -I "^#include <\(uapi/\)*asm-generic/mman-common.h>$$" ../include/uapi/asm-generic/mman.h ../../include/uapi/asm-generic/mman.h >/dev/null) \
-	|| echo "Warning: tools/include/uapi/asm-generic/mman.h differs from kernel" >&2 )) || true
-	@(test -f ../../include/uapi/linux/mman.h && ( \
-	(diff -B -I "^#include <\(uapi/\)*asm/mman.h>$$" ../include/uapi/linux/mman.h ../../include/uapi/linux/mman.h >/dev/null) \
-	|| echo "Warning: tools/include/uapi/linux/mman.h differs from kernel" >&2 )) || true
 	$(Q)$(MAKE) $(build)=perf
 
 $(JEVENTS_IN): FORCE
@@ -470,7 +403,7 @@
 	$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS) \
 		$(PERF_IN) $(PMU_EVENTS_IN) $(LIBS) -o $@
 
-$(GTK_IN): fixdep FORCE
+$(GTK_IN): FORCE
 	$(Q)$(MAKE) $(build)=gtk
 
 $(OUTPUT)libperf-gtk.so: $(GTK_IN) $(PERFLIBS)
@@ -515,7 +448,7 @@
 __build-dir = $(subst $(OUTPUT),,$(dir $@))
 build-dir   = $(if $(__build-dir),$(__build-dir),.)
 
-prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h fixdep archheaders
+prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders
 
 $(OUTPUT)%.o: %.c prepare FORCE
 	$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
@@ -555,7 +488,7 @@
 
 LIBPERF_IN := $(OUTPUT)libperf-in.o
 
-$(LIBPERF_IN): prepare fixdep FORCE
+$(LIBPERF_IN): prepare FORCE
 	$(Q)$(MAKE) $(build)=libperf
 
 $(LIB_FILE): $(LIBPERF_IN)
@@ -563,10 +496,10 @@
 
 LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ)
 
-$(LIBTRACEEVENT): fixdep FORCE
+$(LIBTRACEEVENT): FORCE
 	$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a
 
-libtraceevent_plugins: fixdep FORCE
+libtraceevent_plugins: FORCE
 	$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) plugins
 
 $(LIBTRACEEVENT_DYNAMIC_LIST): libtraceevent_plugins
@@ -579,21 +512,21 @@
 install-traceevent-plugins: libtraceevent_plugins
 	$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) install_plugins
 
-$(LIBAPI): fixdep FORCE
+$(LIBAPI): FORCE
 	$(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) $(OUTPUT)libapi.a
 
 $(LIBAPI)-clean:
 	$(call QUIET_CLEAN, libapi)
 	$(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
 
-$(LIBBPF): fixdep FORCE
+$(LIBBPF): FORCE
 	$(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)
 
 $(LIBBPF)-clean:
 	$(call QUIET_CLEAN, libbpf)
 	$(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) clean >/dev/null
 
-$(LIBSUBCMD): fixdep FORCE
+$(LIBSUBCMD): FORCE
 	$(Q)$(MAKE) -C $(SUBCMD_DIR) O=$(OUTPUT) $(OUTPUT)libsubcmd.a
 
 $(LIBSUBCMD)-clean:
@@ -790,3 +723,4 @@
 .PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope FORCE prepare
 .PHONY: libtraceevent_plugins archheaders
 
+endif # force_fixdep
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index 555263e..e93ef0b 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -335,6 +335,9 @@
 326	common	copy_file_range		sys_copy_file_range
 327	64	preadv2			sys_preadv2
 328	64	pwritev2		sys_pwritev2
+329	common	pkey_mprotect		sys_pkey_mprotect
+330	common	pkey_alloc		sys_pkey_alloc
+331	common	pkey_free		sys_pkey_free
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
@@ -374,5 +377,5 @@
 543	x32	io_setup		compat_sys_io_setup
 544	x32	io_submit		compat_sys_io_submit
 545	x32	execveat		compat_sys_execveat/ptregs
-534	x32	preadv2			compat_sys_preadv2
-535	x32	pwritev2		compat_sys_pwritev2
+546	x32	preadv2			compat_sys_preadv64v2
+547	x32	pwritev2		compat_sys_pwritev64v2
diff --git a/tools/perf/arch/x86/util/header.c b/tools/perf/arch/x86/util/header.c
index a74a48d..2eb1154 100644
--- a/tools/perf/arch/x86/util/header.c
+++ b/tools/perf/arch/x86/util/header.c
@@ -69,7 +69,7 @@
 {
 	char *buf = malloc(128);
 
-	if (__get_cpuid(buf, 128, "%s-%u-%X$") < 0) {
+	if (buf && __get_cpuid(buf, 128, "%s-%u-%X$") < 0) {
 		free(buf);
 		return NULL;
 	}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 68861e8..43d5f35 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -2042,11 +2042,16 @@
 		return 0;
 
 	if (transaction_run) {
+		struct parse_events_error errinfo;
+
 		if (pmu_have_event("cpu", "cycles-ct") &&
 		    pmu_have_event("cpu", "el-start"))
-			err = parse_events(evsel_list, transaction_attrs, NULL);
+			err = parse_events(evsel_list, transaction_attrs,
+					   &errinfo);
 		else
-			err = parse_events(evsel_list, transaction_limited_attrs, NULL);
+			err = parse_events(evsel_list,
+					   transaction_limited_attrs,
+					   &errinfo);
 		if (err) {
 			fprintf(stderr, "Cannot set up transaction events\n");
 			return -1;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index c61e012..e68c866 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1061,8 +1061,10 @@
 
 static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
 {
-	if (!strcmp(var, "top.call-graph"))
-		var = "call-graph.record-mode"; /* fall-through */
+	if (!strcmp(var, "top.call-graph")) {
+		var = "call-graph.record-mode";
+		return perf_default_config(var, value, cb);
+	}
 	if (!strcmp(var, "top.children")) {
 		symbol_conf.cumulate_callchain = perf_config_bool(var, value);
 		return 0;
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
new file mode 100755
index 0000000..83fe220
--- /dev/null
+++ b/tools/perf/check-headers.sh
@@ -0,0 +1,61 @@
+#!/bin/sh
+
+HEADERS='
+include/uapi/linux/fcntl.h
+include/uapi/linux/perf_event.h
+include/uapi/linux/stat.h
+include/linux/hash.h
+include/uapi/linux/hw_breakpoint.h
+arch/x86/include/asm/disabled-features.h
+arch/x86/include/asm/required-features.h
+arch/x86/include/asm/cpufeatures.h
+arch/arm/include/uapi/asm/perf_regs.h
+arch/arm64/include/uapi/asm/perf_regs.h
+arch/powerpc/include/uapi/asm/perf_regs.h
+arch/x86/include/uapi/asm/perf_regs.h
+arch/x86/include/uapi/asm/kvm.h
+arch/x86/include/uapi/asm/kvm_perf.h
+arch/x86/include/uapi/asm/svm.h
+arch/x86/include/uapi/asm/vmx.h
+arch/powerpc/include/uapi/asm/kvm.h
+arch/s390/include/uapi/asm/kvm.h
+arch/s390/include/uapi/asm/kvm_perf.h
+arch/s390/include/uapi/asm/sie.h
+arch/arm/include/uapi/asm/kvm.h
+arch/arm64/include/uapi/asm/kvm.h
+include/asm-generic/bitops/arch_hweight.h
+include/asm-generic/bitops/const_hweight.h
+include/asm-generic/bitops/__fls.h
+include/asm-generic/bitops/fls.h
+include/asm-generic/bitops/fls64.h
+include/linux/coresight-pmu.h
+include/uapi/asm-generic/mman-common.h
+'
+
+check () {
+  file=$1
+  opts=
+
+  shift
+  while [ -n "$*" ]; do
+    opts="$opts \"$1\""
+    shift
+  done
+
+  cmd="diff $opts ../$file ../../$file > /dev/null"
+
+  test -f ../../$file &&
+  eval $cmd || echo "Warning: $file differs from kernel" >&2
+}
+
+
+# simple diff check
+for i in $HEADERS; do
+  check $i -B
+done
+
+# diff with extra ignore lines
+check arch/x86/lib/memcpy_64.S        -B -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"
+check arch/x86/lib/memset_64.S        -B -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"
+check include/uapi/asm-generic/mman.h -B -I "^#include <\(uapi/\)*asm-generic/mman-common.h>"
+check include/uapi/linux/mman.h       -B -I "^#include <\(uapi/\)*asm/mman.h>"
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index a508233..2aabf0a 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -123,7 +123,7 @@
 
 		if (pair && UM(pair->start) == mem_start) {
 next_pair:
-			if (strcmp(sym->name, pair->name) == 0) {
+			if (arch__compare_symbol_names(sym->name, pair->name) == 0) {
 				/*
 				 * kallsyms don't have the symbol end, so we
 				 * set that by using the next symbol start - 1,
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 4bc5882..d2c6cdd 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -366,23 +366,7 @@
 	if (!is_regular_file(name))
 		return -EINVAL;
 
-	if (dso__needs_decompress(dso)) {
-		char newpath[KMOD_DECOMP_LEN];
-		size_t len = sizeof(newpath);
-
-		if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
-			free(name);
-			return -dso->load_errno;
-		}
-
-		strcpy(name, newpath);
-	}
-
 	fd = do_open(name);
-
-	if (dso__needs_decompress(dso))
-		unlink(name);
-
 	free(name);
 	return fd;
 }
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index bce80f8..f55d108 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -681,14 +681,14 @@
 	struct perf_evsel_config_term *term;
 	struct list_head *config_terms = &evsel->config_terms;
 	struct perf_event_attr *attr = &evsel->attr;
-	struct callchain_param param;
+	/* callgraph default */
+	struct callchain_param param = {
+		.record_mode = callchain_param.record_mode,
+	};
 	u32 dump_size = 0;
 	int max_stack = 0;
 	const char *callgraph_buf = NULL;
 
-	/* callgraph default */
-	param.record_mode = callchain_param.record_mode;
-
 	list_for_each_entry(term, config_terms, list) {
 		switch (term->type) {
 		case PERF_EVSEL__CONFIG_TERM_PERIOD:
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 10849a0..ad613ea 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -865,7 +865,7 @@
 	 * cumulated only one time to prevent entries more than 100%
 	 * overhead.
 	 */
-	he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
+	he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
 	if (he_cache == NULL)
 		return -ENOMEM;
 
@@ -1030,8 +1030,6 @@
 	if (err)
 		return err;
 
-	iter->max_stack = max_stack_depth;
-
 	err = iter->ops->prepare_entry(iter, al);
 	if (err)
 		goto out;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index a440a04..159d616 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -102,7 +102,6 @@
 	int curr;
 
 	bool hide_unresolved;
-	int max_stack;
 
 	struct perf_evsel *evsel;
 	struct perf_sample *sample;
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 43899e0..e72d370 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -23,8 +23,6 @@
 #endif
 #endif
 
-#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
-
 #ifdef __GNUC__
 #define TYPEOF(x) (__typeof__(x))
 #else
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index a899ef81..76faf5b 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -94,6 +94,7 @@
 	for TARGET in $(TARGETS); do \
 		echo "echo ; echo Running tests in $$TARGET" >> $(ALL_SCRIPT); \
 		echo "echo ========================================" >> $(ALL_SCRIPT); \
+		echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
 		echo "cd $$TARGET" >> $(ALL_SCRIPT); \
 		make -s --no-print-directory -C $$TARGET emit_tests >> $(ALL_SCRIPT); \
 		echo "cd \$$ROOT" >> $(ALL_SCRIPT); \
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index 17e16fc..99d7f13 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -29,9 +29,11 @@
 		echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
 	fi
 	if [ "$OLD_FWPATH" = "" ]; then
-		OLD_FWPATH=" "
+		# A zero-length write won't work; write a null byte
+		printf '\000' >/sys/module/firmware_class/parameters/path
+	else
+		echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
 	fi
-	echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
 	rm -f "$FW"
 	rmdir "$FWPATH"
 }
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
new file mode 100644
index 0000000..5ba7303
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
@@ -0,0 +1,46 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Kprobe event string type argument
+
+[ -f kprobe_events ] || exit_unsupported # this is configurable
+
+echo 0 > events/enable
+echo > kprobe_events
+
+case `uname -m` in
+x86_64)
+  ARG2=%si
+  OFFS=8
+;;
+i[3456]86)
+  ARG2=%cx
+  OFFS=4
+;;
+aarch64)
+  ARG2=%x1
+  OFFS=8
+;;
+arm*)
+  ARG2=%r1
+  OFFS=4
+;;
+*)
+  echo "Please implement other architecture here"
+  exit_untested
+esac
+
+: "Test get argument (1)"
+echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string" > kprobe_events
+echo 1 > events/kprobes/testprobe/enable
+! echo test >> kprobe_events
+tail -n 1 trace | grep -qe "testprobe.* arg1=\"test\""
+
+echo 0 > events/kprobes/testprobe/enable
+: "Test get argument (2)"
+echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string arg2=+0(+${OFFS}(${ARG2})):string" > kprobe_events
+echo 1 > events/kprobes/testprobe/enable
+! echo test1 test2 >> kprobe_events
+tail -n 1 trace | grep -qe "testprobe.* arg1=\"test1\" arg2=\"test2\""
+
+echo 0 > events/enable
+echo > kprobe_events
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
new file mode 100644
index 0000000..231bcd2
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
@@ -0,0 +1,97 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Kprobe event argument syntax
+
+[ -f kprobe_events ] || exit_unsupported # this is configurable
+
+grep "x8/16/32/64" README > /dev/null || exit_unsupported # version issue
+
+echo 0 > events/enable
+echo > kprobe_events
+
+PROBEFUNC="vfs_read"
+GOODREG=
+BADREG=
+GOODSYM="_sdata"
+if ! grep -qw ${GOODSYM} /proc/kallsyms ; then
+  GOODSYM=$PROBEFUNC
+fi
+BADSYM="deaqswdefr"
+SYMADDR=0x`grep -w ${GOODSYM} /proc/kallsyms | cut -f 1 -d " "`
+GOODTYPE="x16"
+BADTYPE="y16"
+
+case `uname -m` in
+x86_64|i[3456]86)
+  GOODREG=%ax
+  BADREG=%ex
+;;
+aarch64)
+  GOODREG=%x0
+  BADREG=%ax
+;;
+arm*)
+  GOODREG=%r0
+  BADREG=%ax
+;;
+esac
+
+test_goodarg() # Good-args
+{
+  while [ "$1" ]; do
+    echo "p ${PROBEFUNC} $1" > kprobe_events
+    shift 1
+  done;
+}
+
+test_badarg() # Bad-args
+{
+  while [ "$1" ]; do
+    ! echo "p ${PROBEFUNC} $1" > kprobe_events
+    shift 1
+  done;
+}
+
+echo > kprobe_events
+
+: "Register access"
+test_goodarg ${GOODREG}
+test_badarg ${BADREG}
+
+: "Symbol access"
+test_goodarg "@${GOODSYM}" "@${SYMADDR}" "@${GOODSYM}+10" "@${GOODSYM}-10"
+test_badarg "@" "@${BADSYM}" "@${GOODSYM}*10" "@${GOODSYM}/10" \
+	    "@${GOODSYM}%10" "@${GOODSYM}&10" "@${GOODSYM}|10"
+
+: "Stack access"
+test_goodarg "\$stack" "\$stack0" "\$stack1"
+test_badarg "\$stackp" "\$stack0+10" "\$stack1-10"
+
+: "Retval access"
+echo "r ${PROBEFUNC} \$retval" > kprobe_events
+! echo "p ${PROBEFUNC} \$retval" > kprobe_events
+
+: "Comm access"
+test_goodarg "\$comm"
+
+: "Indirect memory access"
+test_goodarg "+0(${GOODREG})" "-0(${GOODREG})" "+10(\$stack)" \
+	"+0(\$stack1)" "+10(@${GOODSYM}-10)" "+0(+10(+20(\$stack)))"
+test_badarg "+(${GOODREG})" "(${GOODREG}+10)" "-(${GOODREG})" "(${GOODREG})" \
+	"+10(\$comm)" "+0(${GOODREG})+10"
+
+: "Name assignment"
+test_goodarg "varname=${GOODREG}"
+test_badarg "varname=varname2=${GOODREG}"
+
+: "Type syntax"
+test_goodarg "${GOODREG}:${GOODTYPE}"
+test_badarg "${GOODREG}::${GOODTYPE}" "${GOODREG}:${BADTYPE}" \
+	"${GOODTYPE}:${GOODREG}"
+
+: "Combination check"
+
+test_goodarg "\$comm:string" "+0(\$stack):string"
+test_badarg "\$comm:x64" "\$stack:string" "${GOODREG}:string"
+
+echo > kprobe_events
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
new file mode 100644
index 0000000..4fda01a
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
@@ -0,0 +1,43 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Kprobe events - probe points
+
+[ -f kprobe_events ] || exit_unsupported # this is configurable
+
+TARGET_FUNC=create_trace_kprobe
+
+dec_addr() { # hexaddr
+  printf "%d" "0x"`echo $1 | tail -c 8`
+}
+
+set_offs() { # prev target next
+  A1=`dec_addr $1`
+  A2=`dec_addr $2`
+  A3=`dec_addr $3`
+  TARGET="0x$2" # an address
+  PREV=`expr $A1 - $A2` # offset to previous symbol
+  NEXT=+`expr $A3 - $A2` # offset to next symbol
+  OVERFLOW=+`printf "0x%x" ${PREV}` # overflow offset to previous symbol
+}
+
+# We have to decode symbol addresses to get correct offsets.
+# If the offset is not an instruction boundary, it cause -EILSEQ.
+set_offs `grep -A1 -B1 ${TARGET_FUNC} /proc/kallsyms | cut -f 1 -d " " | xargs`
+
+UINT_TEST=no
+# printf "%x" -1 returns (unsigned long)-1.
+if [ `printf "%x" -1 | wc -c` != 9 ]; then
+  UINT_TEST=yes
+fi
+
+echo 0 > events/enable
+echo > kprobe_events
+echo "p:testprobe ${TARGET_FUNC}" > kprobe_events
+echo "p:testprobe ${TARGET}" > kprobe_events
+echo "p:testprobe ${TARGET_FUNC}${NEXT}" > kprobe_events
+! echo "p:testprobe ${TARGET_FUNC}${PREV}" > kprobe_events
+if [ "${UINT_TEST}" = yes ]; then
+! echo "p:testprobe ${TARGET_FUNC}${OVERFLOW}" > kprobe_events
+fi
+echo > kprobe_events
+clear_trace
diff --git a/tools/testing/selftests/memfd/config b/tools/testing/selftests/memfd/config
new file mode 100644
index 0000000..835c7f4
--- /dev/null
+++ b/tools/testing/selftests/memfd/config
@@ -0,0 +1 @@
+CONFIG_FUSE_FS=m
diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
index 4124593..9b654a0 100644
--- a/tools/testing/selftests/net/psock_fanout.c
+++ b/tools/testing/selftests/net/psock_fanout.c
@@ -97,6 +97,8 @@
 
 static void sock_fanout_set_ebpf(int fd)
 {
+	static char log_buf[65536];
+
 	const int len_off = __builtin_offsetof(struct __sk_buff, len);
 	struct bpf_insn prog[] = {
 		{ BPF_ALU64 | BPF_MOV | BPF_X,   6, 1, 0, 0 },
@@ -109,7 +111,6 @@
 		{ BPF_ALU   | BPF_MOV | BPF_K,   0, 0, 0, 0 },
 		{ BPF_JMP   | BPF_EXIT,          0, 0, 0, 0 }
 	};
-	char log_buf[512];
 	union bpf_attr attr;
 	int pfd;
 
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
index 4a82174..cad14cd 100644
--- a/tools/testing/selftests/net/reuseport_bpf.c
+++ b/tools/testing/selftests/net/reuseport_bpf.c
@@ -21,6 +21,7 @@
 #include <sys/epoll.h>
 #include <sys/types.h>
 #include <sys/socket.h>
+#include <sys/resource.h>
 #include <unistd.h>
 
 #ifndef ARRAY_SIZE
@@ -190,11 +191,14 @@
 	struct sockaddr * const saddr = new_any_sockaddr(p.send_family, sport);
 	struct sockaddr * const daddr =
 		new_loopback_sockaddr(p.send_family, p.recv_port);
-	const int fd = socket(p.send_family, p.protocol, 0);
+	const int fd = socket(p.send_family, p.protocol, 0), one = 1;
 
 	if (fd < 0)
 		error(1, errno, "failed to create send socket");
 
+	if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)))
+		error(1, errno, "failed to set reuseaddr");
+
 	if (bind(fd, saddr, sockaddr_size()))
 		error(1, errno, "failed to bind send socket");
 
@@ -433,6 +437,21 @@
 	}
 }
 
+static struct rlimit rlim_old, rlim_new;
+
+static  __attribute__((constructor)) void main_ctor(void)
+{
+	getrlimit(RLIMIT_MEMLOCK, &rlim_old);
+	rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
+	rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
+	setrlimit(RLIMIT_MEMLOCK, &rlim_new);
+}
+
+static __attribute__((destructor)) void main_dtor(void)
+{
+	setrlimit(RLIMIT_MEMLOCK, &rlim_old);
+}
+
 int main(void)
 {
 	fprintf(stderr, "---- IPv4 UDP ----\n");
diff --git a/tools/testing/selftests/powerpc/mm/subpage_prot.c b/tools/testing/selftests/powerpc/mm/subpage_prot.c
index 35ade74..3ae77ba 100644
--- a/tools/testing/selftests/powerpc/mm/subpage_prot.c
+++ b/tools/testing/selftests/powerpc/mm/subpage_prot.c
@@ -135,6 +135,16 @@
 	return 0;
 }
 
+static int syscall_available(void)
+{
+	int rc;
+
+	errno = 0;
+	rc = syscall(__NR_subpage_prot, 0, 0, 0);
+
+	return rc == 0 || (errno != ENOENT && errno != ENOSYS);
+}
+
 int test_anon(void)
 {
 	unsigned long align;
@@ -145,6 +155,8 @@
 	void *mallocblock;
 	unsigned long mallocsize;
 
+	SKIP_IF(!syscall_available());
+
 	if (getpagesize() != 0x10000) {
 		fprintf(stderr, "Kernel page size must be 64K!\n");
 		return 1;
@@ -180,6 +192,8 @@
 	off_t filesize;
 	int fd;
 
+	SKIP_IF(!syscall_available());
+
 	fd = open(file_name, O_RDWR);
 	if (fd == -1) {
 		perror("failed to open file");
diff --git a/tools/testing/selftests/pstore/config b/tools/testing/selftests/pstore/config
index 6a8e5a9..d148f9f 100644
--- a/tools/testing/selftests/pstore/config
+++ b/tools/testing/selftests/pstore/config
@@ -2,3 +2,4 @@
 CONFIG_PSTORE=y
 CONFIG_PSTORE_PMSG=y
 CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=m
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index f689981..d5be7b5 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1692,7 +1692,11 @@
 #endif
 
 #ifndef SECCOMP_FILTER_FLAG_TSYNC
-#define SECCOMP_FILTER_FLAG_TSYNC 1
+#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
+#endif
+
+#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW
+#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
 #endif
 
 #ifndef seccomp
@@ -1791,6 +1795,78 @@
 	}
 }
 
+/*
+ * Test detection of known and unknown filter flags. Userspace needs to be able
+ * to check if a filter flag is supported by the current kernel and a good way
+ * of doing that is by attempting to enter filter mode, with the flag bit in
+ * question set, and a NULL pointer for the _args_ parameter. EFAULT indicates
+ * that the flag is valid and EINVAL indicates that the flag is invalid.
+ */
+TEST(detect_seccomp_filter_flags)
+{
+	unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
+				 SECCOMP_FILTER_FLAG_SPEC_ALLOW };
+	unsigned int flag, all_flags;
+	int i;
+	long ret;
+
+	/* Test detection of known-good filter flags */
+	for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
+		int bits = 0;
+
+		flag = flags[i];
+		/* Make sure the flag is a single bit! */
+		while (flag) {
+			if (flag & 0x1)
+				bits ++;
+			flag >>= 1;
+		}
+		ASSERT_EQ(1, bits);
+		flag = flags[i];
+
+		ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
+		ASSERT_NE(ENOSYS, errno) {
+			TH_LOG("Kernel does not support seccomp syscall!");
+		}
+		EXPECT_EQ(-1, ret);
+		EXPECT_EQ(EFAULT, errno) {
+			TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!",
+			       flag);
+		}
+
+		all_flags |= flag;
+	}
+
+	/* Test detection of all known-good filter flags */
+	ret = seccomp(SECCOMP_SET_MODE_FILTER, all_flags, NULL);
+	EXPECT_EQ(-1, ret);
+	EXPECT_EQ(EFAULT, errno) {
+		TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
+		       all_flags);
+	}
+
+	/* Test detection of an unknown filter flag */
+	flag = -1;
+	ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
+	EXPECT_EQ(-1, ret);
+	EXPECT_EQ(EINVAL, errno) {
+		TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!",
+		       flag);
+	}
+
+	/*
+	 * Test detection of an unknown filter flag that may simply need to be
+	 * added to this test
+	 */
+	flag = flags[ARRAY_SIZE(flags) - 1] << 1;
+	ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
+	EXPECT_EQ(-1, ret);
+	EXPECT_EQ(EINVAL, errno) {
+		TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?",
+		       flag);
+	}
+}
+
 TEST(TSYNC_first)
 {
 	struct sock_filter filter[] = {
diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c
index 1c12536..18f5235 100644
--- a/tools/thermal/tmon/sysfs.c
+++ b/tools/thermal/tmon/sysfs.c
@@ -486,6 +486,7 @@
 int update_thermal_data()
 {
 	int i;
+	int next_thermal_record = cur_thermal_record + 1;
 	char tz_name[256];
 	static unsigned long samples;
 
@@ -495,9 +496,9 @@
 	}
 
 	/* circular buffer for keeping historic data */
-	if (cur_thermal_record >= NR_THERMAL_RECORDS)
-		cur_thermal_record = 0;
-	gettimeofday(&trec[cur_thermal_record].tv, NULL);
+	if (next_thermal_record >= NR_THERMAL_RECORDS)
+		next_thermal_record = 0;
+	gettimeofday(&trec[next_thermal_record].tv, NULL);
 	if (tmon_log) {
 		fprintf(tmon_log, "%lu ", ++samples);
 		fprintf(tmon_log, "%3.1f ", p_param.t_target);
@@ -507,11 +508,12 @@
 		snprintf(tz_name, 256, "%s/%s%d", THERMAL_SYSFS, TZONE,
 			ptdata.tzi[i].instance);
 		sysfs_get_ulong(tz_name, "temp",
-				&trec[cur_thermal_record].temp[i]);
+				&trec[next_thermal_record].temp[i]);
 		if (tmon_log)
 			fprintf(tmon_log, "%lu ",
-				trec[cur_thermal_record].temp[i]/1000);
+				trec[next_thermal_record].temp[i] / 1000);
 	}
+	cur_thermal_record = next_thermal_record;
 	for (i = 0; i < ptdata.nr_cooling_dev; i++) {
 		char cdev_name[256];
 		unsigned long val;
diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c
index 9aa1965..b43138f 100644
--- a/tools/thermal/tmon/tmon.c
+++ b/tools/thermal/tmon/tmon.c
@@ -336,7 +336,6 @@
 			show_data_w();
 			show_cooling_device();
 		}
-		cur_thermal_record++;
 		time_elapsed += ticktime;
 		controller_handler(trec[0].temp[target_tz_index] / 1000,
 				&yk);
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 31f5625..1ebbf23 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -208,8 +208,8 @@
 	u8 prop;
 	int ret;
 
-	ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
-			     &prop, 1);
+	ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
+				  &prop, 1);
 
 	if (ret)
 		return ret;
@@ -339,8 +339,9 @@
 		 * this very same byte in the last iteration. Reuse that.
 		 */
 		if (byte_offset != last_byte_offset) {
-			ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
-					     &pendmask, 1);
+			ret = kvm_read_guest_lock(vcpu->kvm,
+						  pendbase + byte_offset,
+						  &pendmask, 1);
 			if (ret) {
 				kfree(intids);
 				return ret;
@@ -628,7 +629,7 @@
 		return false;
 
 	/* Each 1st level entry is represented by a 64-bit value. */
-	if (kvm_read_guest(its->dev->kvm,
+	if (kvm_read_guest_lock(its->dev->kvm,
 			   BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
 			   &indirect_ptr, sizeof(indirect_ptr)))
 		return false;
@@ -1152,8 +1153,8 @@
 	cbaser = CBASER_ADDRESS(its->cbaser);
 
 	while (its->cwriter != its->creadr) {
-		int ret = kvm_read_guest(kvm, cbaser + its->creadr,
-					 cmd_buf, ITS_CMD_SIZE);
+		int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
+					      cmd_buf, ITS_CMD_SIZE);
 		/*
 		 * If kvm_read_guest() fails, this could be due to the guest
 		 * programming a bogus value in CBASER or something else going
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index eaae725..4f2a2df 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1466,7 +1466,8 @@
 
 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
 			       unsigned long addr, bool *async,
-			       bool write_fault, kvm_pfn_t *p_pfn)
+			       bool write_fault, bool *writable,
+			       kvm_pfn_t *p_pfn)
 {
 	unsigned long pfn;
 	int r;
@@ -1492,6 +1493,8 @@
 
 	}
 
+	if (writable)
+		*writable = true;
 
 	/*
 	 * Get a reference here because callers of *hva_to_pfn* and
@@ -1557,7 +1560,7 @@
 	if (vma == NULL)
 		pfn = KVM_PFN_ERR_FAULT;
 	else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
-		r = hva_to_pfn_remapped(vma, addr, async, write_fault, &pfn);
+		r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn);
 		if (r == -EAGAIN)
 			goto retry;
 		if (r < 0)